]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/common.py
Improved progress reporting (See desc) (#1125)
[yt-dlp.git] / yt_dlp / extractor / common.py
CommitLineData
fd475508 1# coding: utf-8
6a3828fd 2from __future__ import unicode_literals
f1a9d64e 3
d6983cb4 4import base64
f4b1c7ad 5import datetime
3ec05685 6import hashlib
3d3538e4 7import json
4094b6e3 8import netrc
d6983cb4 9import os
773f291d 10import random
d6983cb4 11import re
d6983cb4 12import sys
4094b6e3 13import time
1bac3455 14import math
d6983cb4 15
8c25f81b 16from ..compat import (
6c22cee6 17 compat_cookiejar_Cookie,
f7ad7160 18 compat_cookies_SimpleCookie,
ee0ba927 19 compat_etree_Element,
e9c0cdd3 20 compat_etree_fromstring,
0001fcb5 21 compat_expanduser,
e64b7569 22 compat_getpass,
d6983cb4 23 compat_http_client,
e9c0cdd3
YCH
24 compat_os_name,
25 compat_str,
d6983cb4 26 compat_urllib_error,
98763ee3 27 compat_urllib_parse_unquote,
15707c7e 28 compat_urllib_parse_urlencode,
41d06b04 29 compat_urllib_request,
f0b5d6af 30 compat_urlparse,
e01c3d2e 31 compat_xml_parse_error,
8c25f81b 32)
eb8a4433 33from ..downloader import FileDownloader
48107c19
S
34from ..downloader.f4m import (
35 get_base_url,
36 remove_encrypted_media,
37)
8c25f81b 38from ..utils import (
05900629 39 age_restricted,
02dc0a36 40 base_url,
08f2a92c 41 bug_reports_message,
d6983cb4
PH
42 clean_html,
43 compiled_regex_type,
70f0f5a8 44 determine_ext,
46b18f23 45 determine_protocol,
d493f15c 46 dict_get,
9b9c5355 47 error_to_compat_str,
46b18f23 48 extract_attributes,
b868936c 49 ExtractorError,
97f4aecf 50 fix_xml_ampersands,
b14f3a4c 51 float_or_none,
b868936c 52 format_field,
773f291d
S
53 GeoRestrictedError,
54 GeoUtils,
31bb8d3f 55 int_or_none,
a4a554a7 56 js_to_json,
0685d972 57 JSON_LD_RE,
46b18f23 58 mimetype2ext,
3158150c 59 network_exceptions,
b868936c 60 NO_DEFAULT,
46b18f23 61 orderedSet,
d493f15c 62 parse_bitrate,
46b18f23
JH
63 parse_codecs,
64 parse_duration,
4ca2a3cf 65 parse_iso8601,
46b18f23 66 parse_m3u8_attributes,
d493f15c 67 parse_resolution,
55b3e45b 68 RegexNotFoundError,
46b18f23 69 sanitize_filename,
b868936c 70 sanitized_Request,
d493f15c 71 str_or_none,
ce5b9040 72 str_to_int,
f856816b 73 strip_or_none,
5d3a0e79 74 traverse_obj,
f38de77f 75 unescapeHTML,
647eab45 76 unified_strdate,
6b3a3098 77 unified_timestamp,
46b18f23
JH
78 update_Request,
79 update_url_query,
a107193e 80 url_basename,
bebef109 81 url_or_none,
b868936c 82 urljoin,
6606817a 83 variadic,
a6571f10 84 xpath_element,
8d6765cf
S
85 xpath_text,
86 xpath_with_ns,
d6983cb4 87)
c342041f 88
d6983cb4
PH
89
90class InfoExtractor(object):
91 """Information Extractor class.
92
93 Information extractors are the classes that, given a URL, extract
94 information about the video (or videos) the URL refers to. This
95 information includes the real video URL, the video title, author and
96 others. The information is stored in a dictionary which is then
5d380852 97 passed to the YoutubeDL. The YoutubeDL processes this
d6983cb4
PH
98 information possibly downloading the video to the file system, among
99 other possible outcomes.
100
cf0649f8 101 The type field determines the type of the result.
fed5d032
PH
102 By far the most common value (and the default if _type is missing) is
103 "video", which indicates a single video.
104
105 For a video, the dictionaries must include the following fields:
d6983cb4
PH
106
107 id: Video identifier.
d6983cb4 108 title: Video title, unescaped.
d67b0b15 109
f49d89ee 110 Additionally, it must contain either a formats entry or a url one:
d67b0b15 111
f49d89ee
PH
112 formats: A list of dictionaries for each format available, ordered
113 from worst to best quality.
114
115 Potential fields:
c790e93a
S
116 * url The mandatory URL representing the media:
117 for plain file media - HTTP URL of this file,
118 for RTMP - RTMP URL,
119 for HLS - URL of the M3U8 media playlist,
120 for HDS - URL of the F4M manifest,
79d2077e
S
121 for DASH
122 - HTTP URL to plain file media (in case of
123 unfragmented media)
124 - URL of the MPD manifest or base URL
125 representing the media if MPD manifest
8ed7a233 126 is parsed from a string (in case of
79d2077e 127 fragmented media)
c790e93a 128 for MSS - URL of the ISM manifest.
86f4d14f
S
129 * manifest_url
130 The URL of the manifest file in case of
c790e93a
S
131 fragmented media:
132 for HLS - URL of the M3U8 master playlist,
133 for HDS - URL of the F4M manifest,
134 for DASH - URL of the MPD manifest,
135 for MSS - URL of the ISM manifest.
10952eb2 136 * ext Will be calculated from URL if missing
d67b0b15
PH
137 * format A human-readable description of the format
138 ("mp4 container with h264/opus").
139 Calculated from the format_id, width, height.
140 and format_note fields if missing.
141 * format_id A short description of the format
5d4f3985
PH
142 ("mp4_h264_opus" or "19").
143 Technically optional, but strongly recommended.
d67b0b15
PH
144 * format_note Additional info about the format
145 ("3D" or "DASH video")
146 * width Width of the video, if known
147 * height Height of the video, if known
f49d89ee 148 * resolution Textual description of width and height
7217e148 149 * tbr Average bitrate of audio and video in KBit/s
d67b0b15
PH
150 * abr Average audio bitrate in KBit/s
151 * acodec Name of the audio codec in use
dd27fd17 152 * asr Audio sampling rate in Hertz
d67b0b15 153 * vbr Average video bitrate in KBit/s
fbb21cf5 154 * fps Frame rate
d67b0b15 155 * vcodec Name of the video codec in use
1394ce65 156 * container Name of the container format
d67b0b15 157 * filesize The number of bytes, if known in advance
9732d77e 158 * filesize_approx An estimate for the number of bytes
d67b0b15 159 * player_url SWF Player URL (used for rtmpdump).
c7deaa4c
PH
160 * protocol The protocol that will be used for the actual
161 download, lower-case.
0fa9a1e2 162 "http", "https", "rtsp", "rtmp", "rtmp_ffmpeg", "rtmpe",
af7d5a63 163 "m3u8", "m3u8_native" or "http_dash_segments".
c58c2d63
S
164 * fragment_base_url
165 Base URL for fragments. Each fragment's path
166 value (if present) will be relative to
167 this URL.
168 * fragments A list of fragments of a fragmented media.
169 Each fragment entry must contain either an url
170 or a path. If an url is present it should be
171 considered by a client. Otherwise both path and
172 fragment_base_url must be present. Here is
173 the list of all potential fields:
174 * "url" - fragment's URL
175 * "path" - fragment's path relative to
176 fragment_base_url
a0d5077c
S
177 * "duration" (optional, int or float)
178 * "filesize" (optional, int)
f49d89ee 179 * preference Order number of this format. If this field is
08d13955 180 present and not None, the formats get sorted
38d63d84 181 by this field, regardless of all other values.
f49d89ee
PH
182 -1 for default (order by other properties),
183 -2 or smaller for less than default.
e65566a9
PH
184 < -1000 to hide the format (if there is
185 another one which is strictly better)
32f90364
PH
186 * language Language code, e.g. "de" or "en-US".
187 * language_preference Is this in the language mentioned in
188 the URL?
aff2f4f4
PH
189 10 if it's what the URL is about,
190 -1 for default (don't know),
191 -10 otherwise, other values reserved for now.
5d73273f
PH
192 * quality Order number of the video quality of this
193 format, irrespective of the file format.
194 -1 for default (order by other properties),
195 -2 or smaller for less than default.
c64ed2a3
PH
196 * source_preference Order number for this video source
197 (quality takes higher priority)
198 -1 for default (order by other properties),
199 -2 or smaller for less than default.
d769be6c
PH
200 * http_headers A dictionary of additional HTTP headers
201 to add to the request.
6271f1ca 202 * stretched_ratio If given and not 1, indicates that the
3dee7826
PH
203 video's pixels are not square.
204 width : height ratio as float.
205 * no_resume The server does not support resuming the
206 (HTTP or RTMP) download. Boolean.
88acdbc2 207 * has_drm The format has DRM and cannot be downloaded. Boolean
00c97e3e
S
208 * downloader_options A dictionary of downloader options as
209 described in FileDownloader
3b1fe47d 210 RTMP formats can also have the additional fields: page_url,
211 app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
212 rtmp_protocol, rtmp_real_time
3dee7826 213
c0ba0f48 214 url: Final video URL.
d6983cb4 215 ext: Video filename extension.
d67b0b15
PH
216 format: The video format, defaults to ext (used for --get-format)
217 player_url: SWF Player URL (used for rtmpdump).
2f5865cc 218
d6983cb4
PH
219 The following fields are optional:
220
f5e43bc6 221 alt_title: A secondary title of the video.
0afef30b
PH
222 display_id An alternative identifier for the video, not necessarily
223 unique, but available before title. Typically, id is
224 something like "4234987", title "Dancing naked mole rats",
225 and display_id "dancing-naked-mole-rats"
d5519808 226 thumbnails: A list of dictionaries, with the following entries:
cfb56d1a 227 * "id" (optional, string) - Thumbnail format ID
d5519808 228 * "url"
cfb56d1a 229 * "preference" (optional, int) - quality of the image
d5519808
PH
230 * "width" (optional, int)
231 * "height" (optional, int)
5e1c39ac 232 * "resolution" (optional, string "{width}x{height}",
d5519808 233 deprecated)
2de624fd 234 * "filesize" (optional, int)
0ba692ac 235 * "_test_url" (optional, bool) - If true, test the URL
d6983cb4 236 thumbnail: Full URL to a video thumbnail image.
f5e43bc6 237 description: Full video description.
d6983cb4 238 uploader: Full name of the video uploader.
2bc0c46f 239 license: License name the video is licensed under.
8a92e51c 240 creator: The creator of the video.
10db0d2f 241 release_timestamp: UNIX timestamp of the moment the video was released.
8aab976b 242 release_date: The date (YYYYMMDD) when the video was released.
10db0d2f 243 timestamp: UNIX timestamp of the moment the video was uploaded
d6983cb4 244 upload_date: Video upload date (YYYYMMDD).
955c4514 245 If not explicitly set, calculated from timestamp.
d6983cb4 246 uploader_id: Nickname or id of the video uploader.
7bcd2830 247 uploader_url: Full URL to a personal webpage of the video uploader.
6f1f59f3 248 channel: Full name of the channel the video is uploaded on.
0e7b8d3e 249 Note that channel fields may or may not repeat uploader
6f1f59f3
S
250 fields. This depends on a particular extractor.
251 channel_id: Id of the channel.
252 channel_url: Full URL to a channel webpage.
da9ec3b9 253 location: Physical location where the video was filmed.
a504ced0 254 subtitles: The available subtitles as a dictionary in the format
4606c34e
YCH
255 {tag: subformats}. "tag" is usually a language code, and
256 "subformats" is a list sorted from lower to higher
257 preference, each element is a dictionary with the "ext"
258 entry and one of:
a504ced0 259 * "data": The subtitles file contents
10952eb2 260 * "url": A URL pointing to the subtitles file
2412044c 261 It can optionally also have:
262 * "name": Name or description of the subtitles
4bba3716 263 "ext" will be calculated from URL if missing
e167860c 264 automatic_captions: Like 'subtitles'; contains automatically generated
265 captions instead of normal subtitles
62d231c0 266 duration: Length of the video in seconds, as an integer or float.
f3d29461 267 view_count: How many users have watched the video on the platform.
19e3dfc9
PH
268 like_count: Number of positive ratings of the video
269 dislike_count: Number of negative ratings of the video
02835c6b 270 repost_count: Number of reposts of the video
2d30521a 271 average_rating: Average rating give by users, the scale used depends on the webpage
19e3dfc9 272 comment_count: Number of comments on the video
dd622d7c
PH
273 comments: A list of comments, each with one or more of the following
274 properties (all but one of text or html optional):
275 * "author" - human-readable name of the comment author
276 * "author_id" - user ID of the comment author
a1c5d2ca 277 * "author_thumbnail" - The thumbnail of the comment author
dd622d7c
PH
278 * "id" - Comment ID
279 * "html" - Comment as HTML
280 * "text" - Plain text of the comment
281 * "timestamp" - UNIX timestamp of comment
282 * "parent" - ID of the comment this one is replying to.
283 Set to "root" to indicate that this is a
284 comment to the original video.
a1c5d2ca
M
285 * "like_count" - Number of positive ratings of the comment
286 * "dislike_count" - Number of negative ratings of the comment
287 * "is_favorited" - Whether the comment is marked as
288 favorite by the video uploader
289 * "author_is_uploader" - Whether the comment is made by
290 the video uploader
8dbe9899 291 age_limit: Age restriction for the video, as an integer (years)
7a5c1cfe 292 webpage_url: The URL to the video webpage, if given to yt-dlp it
9103bbc5
JMF
293 should allow to get the same result again. (It will be set
294 by YoutubeDL if it's missing)
ad3bc6ac
PH
295 categories: A list of categories that the video falls in, for example
296 ["Sports", "Berlin"]
864f24bd 297 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
d0fb4bd1 298 cast: A list of the video cast
7267bd53
PH
299 is_live: True, False, or None (=unknown). Whether this video is a
300 live stream that goes on instead of a fixed-length video.
f76ede8e 301 was_live: True, False, or None (=unknown). Whether this video was
302 originally a live stream.
3dbb2a9d 303 live_status: 'is_live', 'is_upcoming', 'was_live', 'not_live' or None (=unknown)
ae30b840 304 If absent, automatically set from is_live, was_live
7c80519c 305 start_time: Time in seconds where the reproduction should start, as
10952eb2 306 specified in the URL.
297a564b 307 end_time: Time in seconds where the reproduction should end, as
10952eb2 308 specified in the URL.
55949fed 309 chapters: A list of dictionaries, with the following entries:
310 * "start_time" - The start time of the chapter in seconds
311 * "end_time" - The end time of the chapter in seconds
312 * "title" (optional, string)
6cfda058 313 playable_in_embed: Whether this video is allowed to play in embedded
314 players on other sites. Can be True (=always allowed),
315 False (=never allowed), None (=unknown), or a string
c224251a
M
316 specifying the criteria for embedability (Eg: 'whitelist')
317 availability: Under what condition the video is available. One of
318 'private', 'premium_only', 'subscriber_only', 'needs_auth',
319 'unlisted' or 'public'. Use 'InfoExtractor._availability'
320 to set it
277d6ff5 321 __post_extractor: A function to be called just before the metadata is
322 written to either disk, logger or console. The function
323 must return a dict which will be added to the info_dict.
324 This is usefull for additional information that is
325 time-consuming to extract. Note that the fields thus
326 extracted will not be available to output template and
327 match_filter. So, only "comments" and "comment_count" are
328 currently allowed to be extracted via this method.
d6983cb4 329
7109903e
S
330 The following fields should only be used when the video belongs to some logical
331 chapter or section:
332
333 chapter: Name or title of the chapter the video belongs to.
27bfd4e5
S
334 chapter_number: Number of the chapter the video belongs to, as an integer.
335 chapter_id: Id of the chapter the video belongs to, as a unicode string.
7109903e
S
336
337 The following fields should only be used when the video is an episode of some
8d76bdf1 338 series, programme or podcast:
7109903e
S
339
340 series: Title of the series or programme the video episode belongs to.
341 season: Title of the season the video episode belongs to.
27bfd4e5
S
342 season_number: Number of the season the video episode belongs to, as an integer.
343 season_id: Id of the season the video episode belongs to, as a unicode string.
7109903e
S
344 episode: Title of the video episode. Unlike mandatory video title field,
345 this field should denote the exact title of the video episode
346 without any kind of decoration.
27bfd4e5
S
347 episode_number: Number of the video episode within a season, as an integer.
348 episode_id: Id of the video episode, as a unicode string.
7109903e 349
7a93ab5f
S
350 The following fields should only be used when the media is a track or a part of
351 a music album:
352
353 track: Title of the track.
354 track_number: Number of the track within an album or a disc, as an integer.
355 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
356 as a unicode string.
357 artist: Artist(s) of the track.
358 genre: Genre(s) of the track.
359 album: Title of the album the track belongs to.
360 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
361 album_artist: List of all artists appeared on the album (e.g.
362 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
363 and compilations).
364 disc_number: Number of the disc or other physical medium the track belongs to,
365 as an integer.
366 release_year: Year (YYYY) when the album was released.
367
deefc05b 368 Unless mentioned otherwise, the fields should be Unicode strings.
d6983cb4 369
d838b1bd
PH
370 Unless mentioned otherwise, None is equivalent to absence of information.
371
fed5d032
PH
372
373 _type "playlist" indicates multiple videos.
b82f815f
PH
374 There must be a key "entries", which is a list, an iterable, or a PagedList
375 object, each element of which is a valid dictionary by this specification.
fed5d032 376
b60419c5 377 Additionally, playlists can have "id", "title", and any other relevent
378 attributes with the same semantics as videos (see above).
fed5d032
PH
379
380
381 _type "multi_video" indicates that there are multiple videos that
382 form a single show, for examples multiple acts of an opera or TV episode.
383 It must have an entries key like a playlist and contain all the keys
384 required for a video at the same time.
385
386
387 _type "url" indicates that the video must be extracted from another
388 location, possibly by a different extractor. Its only required key is:
389 "url" - the next URL to extract.
f58766ce
PH
390 The key "ie_key" can be set to the class name (minus the trailing "IE",
391 e.g. "Youtube") if the extractor class is known in advance.
392 Additionally, the dictionary may have any properties of the resolved entity
393 known in advance, for example "title" if the title of the referred video is
fed5d032
PH
394 known ahead of time.
395
396
397 _type "url_transparent" entities have the same specification as "url", but
398 indicate that the given additional information is more precise than the one
399 associated with the resolved URL.
400 This is useful when a site employs a video service that hosts the video and
401 its technical metadata, but that video service does not embed a useful
402 title, description etc.
403
404
d6983cb4
PH
405 Subclasses of this one should re-define the _real_initialize() and
406 _real_extract() methods and define a _VALID_URL regexp.
407 Probably, they should also be added to the list of extractors.
408
e6f21b3d 409 Subclasses may also override suitable() if necessary, but ensure the function
410 signature is preserved and that this function imports everything it needs
411 (except other extractors), so that lazy_extractors works correctly
412
4248dad9 413 _GEO_BYPASS attribute may be set to False in order to disable
773f291d
S
414 geo restriction bypass mechanisms for a particular extractor.
415 Though it won't disable explicit geo restriction bypass based on
504f20dd 416 country code provided with geo_bypass_country.
4248dad9
S
417
418 _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
419 countries for this extractor. One of these countries will be used by
420 geo restriction bypass mechanism right away in order to bypass
504f20dd 421 geo restriction, of course, if the mechanism is not disabled.
773f291d 422
5f95927a
S
423 _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
424 IP blocks in CIDR notation for this extractor. One of these IP blocks
425 will be used by geo restriction bypass mechanism similarly
504f20dd 426 to _GEO_COUNTRIES.
3ccdde8c 427
e6f21b3d 428 The _WORKING attribute should be set to False for broken IEs
d6983cb4
PH
429 in order to warn the users and skip the tests.
430 """
431
432 _ready = False
433 _downloader = None
773f291d 434 _x_forwarded_for_ip = None
4248dad9
S
435 _GEO_BYPASS = True
436 _GEO_COUNTRIES = None
5f95927a 437 _GEO_IP_BLOCKS = None
d6983cb4
PH
438 _WORKING = True
439
9d5d4d64 440 _LOGIN_HINTS = {
441 'any': 'Use --cookies, --username and --password or --netrc to provide account credentials',
442 'cookies': (
443 'Use --cookies for the authentication. '
444 'See https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl for how to pass cookies'),
445 'password': 'Use --username and --password or --netrc to provide account credentials',
446 }
447
d6983cb4
PH
448 def __init__(self, downloader=None):
449 """Constructor. Receives an optional downloader."""
450 self._ready = False
773f291d 451 self._x_forwarded_for_ip = None
28f436ba 452 self._printed_messages = set()
d6983cb4
PH
453 self.set_downloader(downloader)
454
455 @classmethod
5ad28e7f 456 def _match_valid_url(cls, url):
79cb2577
PH
457 # This does not use has/getattr intentionally - we want to know whether
458 # we have cached the regexp for *this* class, whereas getattr would also
459 # match the superclass
460 if '_VALID_URL_RE' not in cls.__dict__:
461 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
5ad28e7f 462 return cls._VALID_URL_RE.match(url)
463
464 @classmethod
465 def suitable(cls, url):
466 """Receives a URL and returns True if suitable for this IE."""
3fb4e21b 467 # This function must import everything it needs (except other extractors),
468 # so that lazy_extractors works correctly
5ad28e7f 469 return cls._match_valid_url(url) is not None
d6983cb4 470
ed9266db
PH
471 @classmethod
472 def _match_id(cls, url):
5ad28e7f 473 return cls._match_valid_url(url).group('id')
ed9266db 474
1151c407 475 @classmethod
476 def get_temp_id(cls, url):
477 try:
478 return cls._match_id(url)
479 except (IndexError, AttributeError):
480 return None
481
d6983cb4
PH
482 @classmethod
483 def working(cls):
484 """Getter method for _WORKING."""
485 return cls._WORKING
486
487 def initialize(self):
488 """Initializes an instance (authentication, etc)."""
28f436ba 489 self._printed_messages = set()
5f95927a
S
490 self._initialize_geo_bypass({
491 'countries': self._GEO_COUNTRIES,
492 'ip_blocks': self._GEO_IP_BLOCKS,
493 })
4248dad9
S
494 if not self._ready:
495 self._real_initialize()
496 self._ready = True
497
5f95927a 498 def _initialize_geo_bypass(self, geo_bypass_context):
e39b5d4a
S
499 """
500 Initialize geo restriction bypass mechanism.
501
502 This method is used to initialize geo bypass mechanism based on faking
503 X-Forwarded-For HTTP header. A random country from provided country list
dc0a869e 504 is selected and a random IP belonging to this country is generated. This
e39b5d4a
S
505 IP will be passed as X-Forwarded-For HTTP header in all subsequent
506 HTTP requests.
e39b5d4a
S
507
508 This method will be used for initial geo bypass mechanism initialization
5f95927a
S
509 during the instance initialization with _GEO_COUNTRIES and
510 _GEO_IP_BLOCKS.
e39b5d4a 511
5f95927a 512 You may also manually call it from extractor's code if geo bypass
e39b5d4a 513 information is not available beforehand (e.g. obtained during
5f95927a
S
514 extraction) or due to some other reason. In this case you should pass
515 this information in geo bypass context passed as first argument. It may
516 contain following fields:
517
518 countries: List of geo unrestricted countries (similar
519 to _GEO_COUNTRIES)
520 ip_blocks: List of geo unrestricted IP blocks in CIDR notation
521 (similar to _GEO_IP_BLOCKS)
522
e39b5d4a 523 """
773f291d 524 if not self._x_forwarded_for_ip:
5f95927a
S
525
526 # Geo bypass mechanism is explicitly disabled by user
a06916d9 527 if not self.get_param('geo_bypass', True):
5f95927a
S
528 return
529
530 if not geo_bypass_context:
531 geo_bypass_context = {}
532
533 # Backward compatibility: previously _initialize_geo_bypass
534 # expected a list of countries, some 3rd party code may still use
535 # it this way
536 if isinstance(geo_bypass_context, (list, tuple)):
537 geo_bypass_context = {
538 'countries': geo_bypass_context,
539 }
540
541 # The whole point of geo bypass mechanism is to fake IP
542 # as X-Forwarded-For HTTP header based on some IP block or
543 # country code.
544
545 # Path 1: bypassing based on IP block in CIDR notation
546
547 # Explicit IP block specified by user, use it right away
548 # regardless of whether extractor is geo bypassable or not
a06916d9 549 ip_block = self.get_param('geo_bypass_ip_block', None)
5f95927a
S
550
551 # Otherwise use random IP block from geo bypass context but only
552 # if extractor is known as geo bypassable
553 if not ip_block:
554 ip_blocks = geo_bypass_context.get('ip_blocks')
555 if self._GEO_BYPASS and ip_blocks:
556 ip_block = random.choice(ip_blocks)
557
558 if ip_block:
559 self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
0760b0a7 560 self._downloader.write_debug(
561 '[debug] Using fake IP %s as X-Forwarded-For' % self._x_forwarded_for_ip)
5f95927a
S
562 return
563
564 # Path 2: bypassing based on country code
565
566 # Explicit country code specified by user, use it right away
567 # regardless of whether extractor is geo bypassable or not
a06916d9 568 country = self.get_param('geo_bypass_country', None)
5f95927a
S
569
570 # Otherwise use random country code from geo bypass context but
571 # only if extractor is known as geo bypassable
572 if not country:
573 countries = geo_bypass_context.get('countries')
574 if self._GEO_BYPASS and countries:
575 country = random.choice(countries)
576
577 if country:
578 self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
0760b0a7 579 self._downloader.write_debug(
580 'Using fake IP %s (%s) as X-Forwarded-For' % (self._x_forwarded_for_ip, country.upper()))
d6983cb4
PH
581
582 def extract(self, url):
583 """Extracts URL information and returns it in list of dicts."""
3a5bcd03 584 try:
773f291d
S
585 for _ in range(2):
586 try:
587 self.initialize()
a06916d9 588 self.write_debug('Extracting URL: %s' % url)
0016b84e 589 ie_result = self._real_extract(url)
07cce701 590 if ie_result is None:
591 return None
0016b84e
S
592 if self._x_forwarded_for_ip:
593 ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
53ed7066 594 subtitles = ie_result.get('subtitles')
595 if (subtitles and 'live_chat' in subtitles
a06916d9 596 and 'no-live-chat' in self.get_param('compat_opts', [])):
53ed7066 597 del subtitles['live_chat']
0016b84e 598 return ie_result
773f291d 599 except GeoRestrictedError as e:
4248dad9
S
600 if self.__maybe_fake_ip_and_retry(e.countries):
601 continue
773f291d 602 raise
1151c407 603 except ExtractorError as e:
604 video_id = e.video_id or self.get_temp_id(url)
605 raise ExtractorError(
606 e.msg, video_id=video_id, ie=self.IE_NAME, tb=e.traceback, expected=e.expected, cause=e.cause)
3a5bcd03 607 except compat_http_client.IncompleteRead as e:
1151c407 608 raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
9650885b 609 except (KeyError, StopIteration) as e:
1151c407 610 raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
d6983cb4 611
4248dad9 612 def __maybe_fake_ip_and_retry(self, countries):
a06916d9 613 if (not self.get_param('geo_bypass_country', None)
3089bc74 614 and self._GEO_BYPASS
a06916d9 615 and self.get_param('geo_bypass', True)
3089bc74
S
616 and not self._x_forwarded_for_ip
617 and countries):
eea0716c
S
618 country_code = random.choice(countries)
619 self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
4248dad9
S
620 if self._x_forwarded_for_ip:
621 self.report_warning(
eea0716c
S
622 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
623 % (self._x_forwarded_for_ip, country_code.upper()))
4248dad9
S
624 return True
625 return False
626
d6983cb4
PH
627 def set_downloader(self, downloader):
628 """Sets the downloader for this IE."""
629 self._downloader = downloader
630
631 def _real_initialize(self):
632 """Real initialization process. Redefine in subclasses."""
633 pass
634
635 def _real_extract(self, url):
636 """Real extraction process. Redefine in subclasses."""
637 pass
638
56c73665
JMF
639 @classmethod
640 def ie_key(cls):
641 """A string for getting the InfoExtractor with get_info_extractor"""
3fb4e21b 642 return cls.__name__[:-2]
56c73665 643
d6983cb4
PH
644 @property
645 def IE_NAME(self):
dc519b54 646 return compat_str(type(self).__name__[:-2])
d6983cb4 647
d391b7e2
S
648 @staticmethod
649 def __can_accept_status_code(err, expected_status):
650 assert isinstance(err, compat_urllib_error.HTTPError)
651 if expected_status is None:
652 return False
d391b7e2
S
653 elif callable(expected_status):
654 return expected_status(err.code) is True
655 else:
6606817a 656 return err.code in variadic(expected_status)
d391b7e2
S
657
658 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
659 """
660 Return the response handle.
661
662 See _download_webpage docstring for arguments specification.
663 """
1cf376f5 664 if not self._downloader._first_webpage_request:
a06916d9 665 sleep_interval = float_or_none(self.get_param('sleep_interval_requests')) or 0
1cf376f5 666 if sleep_interval > 0:
5ef7d9bd 667 self.to_screen('Sleeping %s seconds ...' % sleep_interval)
1cf376f5 668 time.sleep(sleep_interval)
669 else:
670 self._downloader._first_webpage_request = False
671
d6983cb4
PH
672 if note is None:
673 self.report_download_webpage(video_id)
674 elif note is not False:
7cc3570e 675 if video_id is None:
f1a9d64e 676 self.to_screen('%s' % (note,))
7cc3570e 677 else:
f1a9d64e 678 self.to_screen('%s: %s' % (video_id, note))
2132edaa
S
679
680 # Some sites check X-Forwarded-For HTTP header in order to figure out
681 # the origin of the client behind proxy. This allows bypassing geo
682 # restriction by faking this header's value to IP that belongs to some
683 # geo unrestricted country. We will do so once we encounter any
684 # geo restriction error.
685 if self._x_forwarded_for_ip:
686 if 'X-Forwarded-For' not in headers:
687 headers['X-Forwarded-For'] = self._x_forwarded_for_ip
688
41d06b04
S
689 if isinstance(url_or_request, compat_urllib_request.Request):
690 url_or_request = update_Request(
691 url_or_request, data=data, headers=headers, query=query)
692 else:
cdfee168 693 if query:
694 url_or_request = update_url_query(url_or_request, query)
2c0d9c62 695 if data is not None or headers:
41d06b04 696 url_or_request = sanitized_Request(url_or_request, data, headers)
d6983cb4 697 try:
dca08720 698 return self._downloader.urlopen(url_or_request)
3158150c 699 except network_exceptions as err:
d391b7e2
S
700 if isinstance(err, compat_urllib_error.HTTPError):
701 if self.__can_accept_status_code(err, expected_status):
95e42d73
XDG
702 # Retain reference to error to prevent file object from
703 # being closed before it can be read. Works around the
704 # effects of <https://bugs.python.org/issue15002>
705 # introduced in Python 3.4.1.
706 err.fp._error = err
d391b7e2
S
707 return err.fp
708
aa94a6d3
PH
709 if errnote is False:
710 return False
d6983cb4 711 if errnote is None:
f1a9d64e 712 errnote = 'Unable to download webpage'
7f8b2714 713
9b9c5355 714 errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
7cc3570e
PH
715 if fatal:
716 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
717 else:
6a39ee13 718 self.report_warning(errmsg)
7cc3570e 719 return False
d6983cb4 720
d391b7e2
S
721 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
722 """
723 Return a tuple (page content as string, URL handle).
724
725 See _download_webpage docstring for arguments specification.
726 """
b9d3e163
PH
727 # Strip hashes from the URL (#1038)
728 if isinstance(url_or_request, (compat_str, str)):
729 url_or_request = url_or_request.partition('#')[0]
730
d391b7e2 731 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
7cc3570e
PH
732 if urlh is False:
733 assert not fatal
734 return False
c9a77969 735 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
23be51d8
PH
736 return (content, urlh)
737
c9a77969
YCH
738 @staticmethod
739 def _guess_encoding_from_content(content_type, webpage_bytes):
d6983cb4
PH
740 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
741 if m:
742 encoding = m.group(1)
743 else:
0d75ae2c 744 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
f143d86a
PH
745 webpage_bytes[:1024])
746 if m:
747 encoding = m.group(1).decode('ascii')
b60016e8
PH
748 elif webpage_bytes.startswith(b'\xff\xfe'):
749 encoding = 'utf-16'
f143d86a
PH
750 else:
751 encoding = 'utf-8'
c9a77969
YCH
752
753 return encoding
754
4457823d
S
755 def __check_blocked(self, content):
756 first_block = content[:512]
3089bc74
S
757 if ('<title>Access to this site is blocked</title>' in content
758 and 'Websense' in first_block):
4457823d
S
759 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
760 blocked_iframe = self._html_search_regex(
761 r'<iframe src="([^"]+)"', content,
762 'Websense information URL', default=None)
763 if blocked_iframe:
764 msg += ' Visit %s for more details' % blocked_iframe
765 raise ExtractorError(msg, expected=True)
766 if '<title>The URL you requested has been blocked</title>' in first_block:
767 msg = (
768 'Access to this webpage has been blocked by Indian censorship. '
769 'Use a VPN or proxy server (with --proxy) to route around it.')
770 block_msg = self._html_search_regex(
771 r'</h1><p>(.*?)</p>',
772 content, 'block message', default=None)
773 if block_msg:
774 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
775 raise ExtractorError(msg, expected=True)
3089bc74
S
776 if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
777 and 'blocklist.rkn.gov.ru' in content):
4457823d
S
778 raise ExtractorError(
779 'Access to this webpage has been blocked by decision of the Russian government. '
780 'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
781 expected=True)
782
c9a77969
YCH
783 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
784 content_type = urlh.headers.get('Content-Type', '')
785 webpage_bytes = urlh.read()
786 if prefix is not None:
787 webpage_bytes = prefix + webpage_bytes
788 if not encoding:
789 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
a06916d9 790 if self.get_param('dump_intermediate_pages', False):
f610dbb0 791 self.to_screen('Dumping request to ' + urlh.geturl())
d6983cb4
PH
792 dump = base64.b64encode(webpage_bytes).decode('ascii')
793 self._downloader.to_screen(dump)
a06916d9 794 if self.get_param('write_pages', False):
f610dbb0 795 basen = '%s_%s' % (video_id, urlh.geturl())
bd6f722d 796 trim_length = self.get_param('trim_file_name') or 240
797 if len(basen) > trim_length:
f1a9d64e 798 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
bd6f722d 799 basen = basen[:trim_length - len(h)] + h
c1bce22f 800 raw_filename = basen + '.dump'
d41e6efc 801 filename = sanitize_filename(raw_filename, restricted=True)
f1a9d64e 802 self.to_screen('Saving request to ' + filename)
5f58165d
S
803 # Working around MAX_PATH limitation on Windows (see
804 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
e9c0cdd3 805 if compat_os_name == 'nt':
5f58165d
S
806 absfilepath = os.path.abspath(filename)
807 if len(absfilepath) > 259:
808 filename = '\\\\?\\' + absfilepath
d41e6efc
PH
809 with open(filename, 'wb') as outf:
810 outf.write(webpage_bytes)
811
ec0fafbb
AA
812 try:
813 content = webpage_bytes.decode(encoding, 'replace')
814 except LookupError:
815 content = webpage_bytes.decode('utf-8', 'replace')
2410c43d 816
4457823d 817 self.__check_blocked(content)
2410c43d 818
23be51d8 819 return content
d6983cb4 820
d391b7e2
S
821 def _download_webpage(
822 self, url_or_request, video_id, note=None, errnote=None,
823 fatal=True, tries=1, timeout=5, encoding=None, data=None,
824 headers={}, query={}, expected_status=None):
825 """
826 Return the data of the page as a string.
827
828 Arguments:
829 url_or_request -- plain text URL as a string or
830 a compat_urllib_request.Requestobject
831 video_id -- Video/playlist/item identifier (string)
832
833 Keyword arguments:
834 note -- note printed before downloading (string)
835 errnote -- note printed in case of an error (string)
836 fatal -- flag denoting whether error should be considered fatal,
837 i.e. whether it should cause ExtractionError to be raised,
838 otherwise a warning will be reported and extraction continued
839 tries -- number of tries
840 timeout -- sleep interval between tries
841 encoding -- encoding for a page content decoding, guessed automatically
842 when not explicitly specified
843 data -- POST data (bytes)
844 headers -- HTTP headers (dict)
845 query -- URL query (dict)
846 expected_status -- allows to accept failed HTTP requests (non 2xx
847 status code) by explicitly specifying a set of accepted status
848 codes. Can be any of the following entities:
849 - an integer type specifying an exact failed status code to
850 accept
851 - a list or a tuple of integer types specifying a list of
852 failed status codes to accept
853 - a callable accepting an actual failed status code and
854 returning True if it should be accepted
855 Note that this argument does not affect success status codes (2xx)
856 which are always accepted.
857 """
858
995ad69c
TF
859 success = False
860 try_count = 0
861 while success is False:
862 try:
d391b7e2
S
863 res = self._download_webpage_handle(
864 url_or_request, video_id, note, errnote, fatal,
865 encoding=encoding, data=data, headers=headers, query=query,
866 expected_status=expected_status)
995ad69c
TF
867 success = True
868 except compat_http_client.IncompleteRead as e:
869 try_count += 1
870 if try_count >= tries:
871 raise e
872 self._sleep(timeout, video_id)
7cc3570e
PH
873 if res is False:
874 return res
875 else:
876 content, _ = res
877 return content
d6983cb4 878
e0d198c1
S
879 def _download_xml_handle(
880 self, url_or_request, video_id, note='Downloading XML',
881 errnote='Unable to download XML', transform_source=None,
d391b7e2
S
882 fatal=True, encoding=None, data=None, headers={}, query={},
883 expected_status=None):
884 """
ee0ba927 885 Return a tuple (xml as an compat_etree_Element, URL handle).
d391b7e2
S
886
887 See _download_webpage docstring for arguments specification.
888 """
e0d198c1
S
889 res = self._download_webpage_handle(
890 url_or_request, video_id, note, errnote, fatal=fatal,
d391b7e2
S
891 encoding=encoding, data=data, headers=headers, query=query,
892 expected_status=expected_status)
e0d198c1
S
893 if res is False:
894 return res
895 xml_string, urlh = res
896 return self._parse_xml(
897 xml_string, video_id, transform_source=transform_source,
898 fatal=fatal), urlh
899
d391b7e2
S
900 def _download_xml(
901 self, url_or_request, video_id,
902 note='Downloading XML', errnote='Unable to download XML',
903 transform_source=None, fatal=True, encoding=None,
904 data=None, headers={}, query={}, expected_status=None):
905 """
ee0ba927 906 Return the xml as an compat_etree_Element.
d391b7e2
S
907
908 See _download_webpage docstring for arguments specification.
909 """
e0d198c1
S
910 res = self._download_xml_handle(
911 url_or_request, video_id, note=note, errnote=errnote,
912 transform_source=transform_source, fatal=fatal, encoding=encoding,
d391b7e2
S
913 data=data, headers=headers, query=query,
914 expected_status=expected_status)
e0d198c1 915 return res if res is False else res[0]
e01c3d2e
S
916
917 def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
e2b38da9
PH
918 if transform_source:
919 xml_string = transform_source(xml_string)
e01c3d2e
S
920 try:
921 return compat_etree_fromstring(xml_string.encode('utf-8'))
922 except compat_xml_parse_error as ve:
923 errmsg = '%s: Failed to parse XML ' % video_id
924 if fatal:
925 raise ExtractorError(errmsg, cause=ve)
926 else:
927 self.report_warning(errmsg + str(ve))
267ed0c5 928
0fe7783e
S
929 def _download_json_handle(
930 self, url_or_request, video_id, note='Downloading JSON metadata',
931 errnote='Unable to download JSON metadata', transform_source=None,
d391b7e2
S
932 fatal=True, encoding=None, data=None, headers={}, query={},
933 expected_status=None):
934 """
935 Return a tuple (JSON object, URL handle).
936
937 See _download_webpage docstring for arguments specification.
938 """
0fe7783e 939 res = self._download_webpage_handle(
c9a77969 940 url_or_request, video_id, note, errnote, fatal=fatal,
d391b7e2
S
941 encoding=encoding, data=data, headers=headers, query=query,
942 expected_status=expected_status)
0fe7783e
S
943 if res is False:
944 return res
945 json_string, urlh = res
ebb64199 946 return self._parse_json(
0fe7783e
S
947 json_string, video_id, transform_source=transform_source,
948 fatal=fatal), urlh
949
950 def _download_json(
951 self, url_or_request, video_id, note='Downloading JSON metadata',
952 errnote='Unable to download JSON metadata', transform_source=None,
d391b7e2
S
953 fatal=True, encoding=None, data=None, headers={}, query={},
954 expected_status=None):
955 """
956 Return the JSON object as a dict.
957
958 See _download_webpage docstring for arguments specification.
959 """
0fe7783e
S
960 res = self._download_json_handle(
961 url_or_request, video_id, note=note, errnote=errnote,
962 transform_source=transform_source, fatal=fatal, encoding=encoding,
d391b7e2
S
963 data=data, headers=headers, query=query,
964 expected_status=expected_status)
0fe7783e 965 return res if res is False else res[0]
ebb64199
TF
966
967 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
81c2f20b
PH
968 if transform_source:
969 json_string = transform_source(json_string)
3d3538e4
PH
970 try:
971 return json.loads(json_string)
972 except ValueError as ve:
e7b6d122
PH
973 errmsg = '%s: Failed to parse JSON ' % video_id
974 if fatal:
975 raise ExtractorError(errmsg, cause=ve)
976 else:
977 self.report_warning(errmsg + str(ve))
3d3538e4 978
adddc50c 979 def _parse_socket_response_as_json(self, data, video_id, transform_source=None, fatal=True):
980 return self._parse_json(
981 data[data.find('{'):data.rfind('}') + 1],
982 video_id, transform_source, fatal)
983
984 def _download_socket_json_handle(
985 self, url_or_request, video_id, note='Polling socket',
986 errnote='Unable to poll socket', transform_source=None,
987 fatal=True, encoding=None, data=None, headers={}, query={},
988 expected_status=None):
989 """
990 Return a tuple (JSON object, URL handle).
991
992 See _download_webpage docstring for arguments specification.
993 """
994 res = self._download_webpage_handle(
995 url_or_request, video_id, note, errnote, fatal=fatal,
996 encoding=encoding, data=data, headers=headers, query=query,
997 expected_status=expected_status)
998 if res is False:
999 return res
1000 webpage, urlh = res
1001 return self._parse_socket_response_as_json(
1002 webpage, video_id, transform_source=transform_source,
1003 fatal=fatal), urlh
1004
1005 def _download_socket_json(
1006 self, url_or_request, video_id, note='Polling socket',
1007 errnote='Unable to poll socket', transform_source=None,
1008 fatal=True, encoding=None, data=None, headers={}, query={},
1009 expected_status=None):
1010 """
1011 Return the JSON object as a dict.
1012
1013 See _download_webpage docstring for arguments specification.
1014 """
1015 res = self._download_socket_json_handle(
1016 url_or_request, video_id, note=note, errnote=errnote,
1017 transform_source=transform_source, fatal=fatal, encoding=encoding,
1018 data=data, headers=headers, query=query,
1019 expected_status=expected_status)
1020 return res if res is False else res[0]
1021
28f436ba 1022 def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
b868936c 1023 idstr = format_field(video_id, template='%s: ')
28f436ba 1024 msg = f'[{self.IE_NAME}] {idstr}{msg}'
1025 if only_once:
1026 if f'WARNING: {msg}' in self._printed_messages:
1027 return
1028 self._printed_messages.add(f'WARNING: {msg}')
1029 self._downloader.report_warning(msg, *args, **kwargs)
f45f96f8 1030
a06916d9 1031 def to_screen(self, msg, *args, **kwargs):
d6983cb4 1032 """Print msg to screen, prefixing it with '[ie_name]'"""
a06916d9 1033 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
1034
1035 def write_debug(self, msg, *args, **kwargs):
1036 self._downloader.write_debug('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
1037
1038 def get_param(self, name, default=None, *args, **kwargs):
1039 if self._downloader:
1040 return self._downloader.params.get(name, default, *args, **kwargs)
1041 return default
d6983cb4 1042
88acdbc2 1043 def report_drm(self, video_id, partial=False):
1044 self.raise_no_formats('This video is DRM protected', expected=True, video_id=video_id)
1045
d6983cb4
PH
1046 def report_extraction(self, id_or_name):
1047 """Report information extraction."""
f1a9d64e 1048 self.to_screen('%s: Extracting information' % id_or_name)
d6983cb4
PH
1049
1050 def report_download_webpage(self, video_id):
1051 """Report webpage download."""
f1a9d64e 1052 self.to_screen('%s: Downloading webpage' % video_id)
d6983cb4
PH
1053
1054 def report_age_confirmation(self):
1055 """Report attempt to confirm age."""
f1a9d64e 1056 self.to_screen('Confirming age')
d6983cb4 1057
fc79158d
JMF
1058 def report_login(self):
1059 """Report attempt to log in."""
f1a9d64e 1060 self.to_screen('Logging in')
fc79158d 1061
b7da73eb 1062 def raise_login_required(
9d5d4d64 1063 self, msg='This video is only available for registered users',
1064 metadata_available=False, method='any'):
a06916d9 1065 if metadata_available and self.get_param('ignore_no_formats_error'):
b7da73eb 1066 self.report_warning(msg)
46890374 1067 if method is not None:
1068 msg = '%s. %s' % (msg, self._LOGIN_HINTS[method])
1069 raise ExtractorError(msg, expected=True)
43e7d3c9 1070
b7da73eb 1071 def raise_geo_restricted(
1072 self, msg='This video is not available from your location due to geo restriction',
1073 countries=None, metadata_available=False):
a06916d9 1074 if metadata_available and self.get_param('ignore_no_formats_error'):
b7da73eb 1075 self.report_warning(msg)
1076 else:
1077 raise GeoRestrictedError(msg, countries=countries)
1078
1079 def raise_no_formats(self, msg, expected=False, video_id=None):
a06916d9 1080 if expected and self.get_param('ignore_no_formats_error'):
b7da73eb 1081 self.report_warning(msg, video_id)
68f5867c
L
1082 elif isinstance(msg, ExtractorError):
1083 raise msg
b7da73eb 1084 else:
1085 raise ExtractorError(msg, expected=expected, video_id=video_id)
c430802e 1086
5f6a1245 1087 # Methods for following #608
c0d0b01f 1088 @staticmethod
830d53bf 1089 def url_result(url, ie=None, video_id=None, video_title=None):
10952eb2 1090 """Returns a URL that points to a page that should be processed"""
5f6a1245 1091 # TODO: ie should be the class used for getting the info
d6983cb4
PH
1092 video_info = {'_type': 'url',
1093 'url': url,
1094 'ie_key': ie}
7012b23c
PH
1095 if video_id is not None:
1096 video_info['id'] = video_id
830d53bf
S
1097 if video_title is not None:
1098 video_info['title'] = video_title
d6983cb4 1099 return video_info
5f6a1245 1100
749ca5ec
S
1101 def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
1102 urls = orderedSet(
46b18f23
JH
1103 self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
1104 for m in matches)
1105 return self.playlist_result(
749ca5ec 1106 urls, playlist_id=playlist_id, playlist_title=playlist_title)
46b18f23 1107
c0d0b01f 1108 @staticmethod
b60419c5 1109 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, **kwargs):
d6983cb4
PH
1110 """Returns a playlist"""
1111 video_info = {'_type': 'playlist',
1112 'entries': entries}
b60419c5 1113 video_info.update(kwargs)
d6983cb4
PH
1114 if playlist_id:
1115 video_info['id'] = playlist_id
1116 if playlist_title:
1117 video_info['title'] = playlist_title
ecc97af3 1118 if playlist_description is not None:
acf5cbfe 1119 video_info['description'] = playlist_description
d6983cb4
PH
1120 return video_info
1121
c342041f 1122 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
d6983cb4
PH
1123 """
1124 Perform a regex search on the given string, using a single or a list of
1125 patterns returning the first matching group.
1126 In case of failure return a default value or raise a WARNING or a
55b3e45b 1127 RegexNotFoundError, depending on fatal, specifying the field name.
d6983cb4
PH
1128 """
1129 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
1130 mobj = re.search(pattern, string, flags)
1131 else:
1132 for p in pattern:
1133 mobj = re.search(p, string, flags)
c3415d1b
PH
1134 if mobj:
1135 break
d6983cb4 1136
819e0531 1137 _name = self._downloader._color_text(name, 'blue')
d6983cb4
PH
1138
1139 if mobj:
711ede6e
PH
1140 if group is None:
1141 # return the first matching group
1142 return next(g for g in mobj.groups() if g is not None)
198f7ea8 1143 elif isinstance(group, (list, tuple)):
1144 return tuple(mobj.group(g) for g in group)
711ede6e
PH
1145 else:
1146 return mobj.group(group)
c342041f 1147 elif default is not NO_DEFAULT:
d6983cb4
PH
1148 return default
1149 elif fatal:
f1a9d64e 1150 raise RegexNotFoundError('Unable to extract %s' % _name)
d6983cb4 1151 else:
6a39ee13 1152 self.report_warning('unable to extract %s' % _name + bug_reports_message())
d6983cb4
PH
1153 return None
1154
c342041f 1155 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
d6983cb4
PH
1156 """
1157 Like _search_regex, but strips HTML tags and unescapes entities.
1158 """
711ede6e 1159 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
d6983cb4
PH
1160 if res:
1161 return clean_html(res).strip()
1162 else:
1163 return res
1164
2118fdd1
RA
1165 def _get_netrc_login_info(self, netrc_machine=None):
1166 username = None
1167 password = None
1168 netrc_machine = netrc_machine or self._NETRC_MACHINE
1169
a06916d9 1170 if self.get_param('usenetrc', False):
2118fdd1 1171 try:
0001fcb5 1172 netrc_file = compat_expanduser(self.get_param('netrc_location') or '~')
1173 if os.path.isdir(netrc_file):
1174 netrc_file = os.path.join(netrc_file, '.netrc')
1175 info = netrc.netrc(file=netrc_file).authenticators(netrc_machine)
2118fdd1
RA
1176 if info is not None:
1177 username = info[0]
1178 password = info[2]
1179 else:
dcce092e
S
1180 raise netrc.NetrcParseError(
1181 'No authenticators for %s' % netrc_machine)
2118fdd1 1182 except (IOError, netrc.NetrcParseError) as err:
6a39ee13 1183 self.report_warning(
dcce092e 1184 'parsing .netrc: %s' % error_to_compat_str(err))
2118fdd1 1185
dcce092e 1186 return username, password
2118fdd1 1187
1b6712ab 1188 def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
fc79158d 1189 """
cf0649f8 1190 Get the login info as (username, password)
32443dd3
S
1191 First look for the manually specified credentials using username_option
1192 and password_option as keys in params dictionary. If no such credentials
1193 available look in the netrc file using the netrc_machine or _NETRC_MACHINE
1194 value.
fc79158d
JMF
1195 If there's no info available, return (None, None)
1196 """
fc79158d
JMF
1197
1198 # Attempt to use provided username and password or .netrc data
a06916d9 1199 username = self.get_param(username_option)
1200 if username is not None:
1201 password = self.get_param(password_option)
2118fdd1 1202 else:
1b6712ab 1203 username, password = self._get_netrc_login_info(netrc_machine)
5f6a1245 1204
2133565c 1205 return username, password
fc79158d 1206
e64b7569 1207 def _get_tfa_info(self, note='two-factor verification code'):
83317f69 1208 """
1209 Get the two-factor authentication info
1210 TODO - asking the user will be required for sms/phone verify
1211 currently just uses the command line option
1212 If there's no info available, return None
1213 """
83317f69 1214
a06916d9 1215 tfa = self.get_param('twofactor')
1216 if tfa is not None:
1217 return tfa
83317f69 1218
e64b7569 1219 return compat_getpass('Type %s and press [Return]: ' % note)
83317f69 1220
46720279
JMF
1221 # Helper functions for extracting OpenGraph info
1222 @staticmethod
ab2d5247 1223 def _og_regexes(prop):
448ef1f3 1224 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
22f5f5c6 1225 property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
7a6d76a6 1226 % {'prop': re.escape(prop)})
78fb87b2 1227 template = r'<meta[^>]+?%s[^>]+?%s'
ab2d5247 1228 return [
78fb87b2
JMF
1229 template % (property_re, content_re),
1230 template % (content_re, property_re),
ab2d5247 1231 ]
46720279 1232
864f24bd
S
1233 @staticmethod
1234 def _meta_regex(prop):
1235 return r'''(?isx)<meta
8b9848ac 1236 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
864f24bd
S
1237 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
1238
3c4e6d83 1239 def _og_search_property(self, prop, html, name=None, **kargs):
6606817a 1240 prop = variadic(prop)
46720279 1241 if name is None:
b070564e
S
1242 name = 'OpenGraph %s' % prop[0]
1243 og_regexes = []
1244 for p in prop:
1245 og_regexes.extend(self._og_regexes(p))
1246 escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
eb0a8398
PH
1247 if escaped is None:
1248 return None
1249 return unescapeHTML(escaped)
46720279
JMF
1250
1251 def _og_search_thumbnail(self, html, **kargs):
10952eb2 1252 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
46720279
JMF
1253
1254 def _og_search_description(self, html, **kargs):
1255 return self._og_search_property('description', html, fatal=False, **kargs)
1256
1257 def _og_search_title(self, html, **kargs):
1258 return self._og_search_property('title', html, **kargs)
1259
8ffa13e0 1260 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
a3681973
PH
1261 regexes = self._og_regexes('video') + self._og_regexes('video:url')
1262 if secure:
1263 regexes = self._og_regexes('video:secure_url') + regexes
8ffa13e0 1264 return self._html_search_regex(regexes, html, name, **kargs)
46720279 1265
78338f71
JMF
1266 def _og_search_url(self, html, **kargs):
1267 return self._og_search_property('url', html, **kargs)
1268
40c696e5 1269 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
6606817a 1270 name = variadic(name)
59040888 1271 if display_name is None:
88d9f6c0 1272 display_name = name[0]
59040888 1273 return self._html_search_regex(
88d9f6c0 1274 [self._meta_regex(n) for n in name],
711ede6e 1275 html, display_name, fatal=fatal, group='content', **kwargs)
59040888
PH
1276
1277 def _dc_search_uploader(self, html):
1278 return self._html_search_meta('dc.creator', html, 'uploader')
1279
8dbe9899
PH
1280 def _rta_search(self, html):
1281 # See http://www.rtalabel.org/index.php?content=howtofaq#single
1282 if re.search(r'(?ix)<meta\s+name="rating"\s+'
1283 r' content="RTA-5042-1996-1400-1577-RTA"',
1284 html):
1285 return 18
1286 return 0
1287
59040888
PH
1288 def _media_rating_search(self, html):
1289 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
1290 rating = self._html_search_meta('rating', html)
1291
1292 if not rating:
1293 return None
1294
1295 RATING_TABLE = {
1296 'safe for kids': 0,
1297 'general': 8,
1298 '14 years': 14,
1299 'mature': 17,
1300 'restricted': 19,
1301 }
d800609c 1302 return RATING_TABLE.get(rating.lower())
59040888 1303
69319969 1304 def _family_friendly_search(self, html):
6ca7732d 1305 # See http://schema.org/VideoObject
ac8491fc
S
1306 family_friendly = self._html_search_meta(
1307 'isFamilyFriendly', html, default=None)
69319969
NJ
1308
1309 if not family_friendly:
1310 return None
1311
1312 RATING_TABLE = {
1313 '1': 0,
1314 'true': 0,
1315 '0': 18,
1316 'false': 18,
1317 }
d800609c 1318 return RATING_TABLE.get(family_friendly.lower())
69319969 1319
0c708f11
JMF
1320 def _twitter_search_player(self, html):
1321 return self._html_search_meta('twitter:player', html,
9e1a5b84 1322 'twitter card player')
0c708f11 1323
95b31e26 1324 def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
4433bb02 1325 json_ld_list = list(re.finditer(JSON_LD_RE, html))
321b5e08 1326 default = kwargs.get('default', NO_DEFAULT)
321b5e08
S
1327 # JSON-LD may be malformed and thus `fatal` should be respected.
1328 # At the same time `default` may be passed that assumes `fatal=False`
1329 # for _search_regex. Let's simulate the same behavior here as well.
dbf5416a 1330 fatal = kwargs.get('fatal', True) if default is NO_DEFAULT else False
4433bb02
S
1331 json_ld = []
1332 for mobj in json_ld_list:
1333 json_ld_item = self._parse_json(
1334 mobj.group('json_ld'), video_id, fatal=fatal)
1335 if not json_ld_item:
1336 continue
1337 if isinstance(json_ld_item, dict):
1338 json_ld.append(json_ld_item)
1339 elif isinstance(json_ld_item, (list, tuple)):
1340 json_ld.extend(json_ld_item)
1341 if json_ld:
1342 json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
1343 if json_ld:
1344 return json_ld
1345 if default is not NO_DEFAULT:
1346 return default
1347 elif fatal:
1348 raise RegexNotFoundError('Unable to extract JSON-LD')
1349 else:
6a39ee13 1350 self.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
4433bb02 1351 return {}
4ca2a3cf 1352
95b31e26 1353 def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
4ca2a3cf
S
1354 if isinstance(json_ld, compat_str):
1355 json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
1356 if not json_ld:
1357 return {}
1358 info = {}
46933a15
S
1359 if not isinstance(json_ld, (list, tuple, dict)):
1360 return info
1361 if isinstance(json_ld, dict):
1362 json_ld = [json_ld]
bae14048 1363
e7e4a6e0
S
1364 INTERACTION_TYPE_MAP = {
1365 'CommentAction': 'comment',
1366 'AgreeAction': 'like',
1367 'DisagreeAction': 'dislike',
1368 'LikeAction': 'like',
1369 'DislikeAction': 'dislike',
1370 'ListenAction': 'view',
1371 'WatchAction': 'view',
1372 'ViewAction': 'view',
1373 }
1374
29f7c58a 1375 def extract_interaction_type(e):
1376 interaction_type = e.get('interactionType')
1377 if isinstance(interaction_type, dict):
1378 interaction_type = interaction_type.get('@type')
1379 return str_or_none(interaction_type)
1380
e7e4a6e0
S
1381 def extract_interaction_statistic(e):
1382 interaction_statistic = e.get('interactionStatistic')
29f7c58a 1383 if isinstance(interaction_statistic, dict):
1384 interaction_statistic = [interaction_statistic]
e7e4a6e0
S
1385 if not isinstance(interaction_statistic, list):
1386 return
1387 for is_e in interaction_statistic:
1388 if not isinstance(is_e, dict):
1389 continue
1390 if is_e.get('@type') != 'InteractionCounter':
1391 continue
29f7c58a 1392 interaction_type = extract_interaction_type(is_e)
1393 if not interaction_type:
e7e4a6e0 1394 continue
ce5b9040
S
1395 # For interaction count some sites provide string instead of
1396 # an integer (as per spec) with non digit characters (e.g. ",")
1397 # so extracting count with more relaxed str_to_int
1398 interaction_count = str_to_int(is_e.get('userInteractionCount'))
e7e4a6e0
S
1399 if interaction_count is None:
1400 continue
1401 count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
1402 if not count_kind:
1403 continue
1404 count_key = '%s_count' % count_kind
1405 if info.get(count_key) is not None:
1406 continue
1407 info[count_key] = interaction_count
1408
bae14048
S
1409 def extract_video_object(e):
1410 assert e['@type'] == 'VideoObject'
f7ad7160 1411 author = e.get('author')
bae14048 1412 info.update({
bebef109 1413 'url': url_or_none(e.get('contentUrl')),
bae14048
S
1414 'title': unescapeHTML(e.get('name')),
1415 'description': unescapeHTML(e.get('description')),
bebef109 1416 'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
bae14048
S
1417 'duration': parse_duration(e.get('duration')),
1418 'timestamp': unified_timestamp(e.get('uploadDate')),
f7ad7160 1419 # author can be an instance of 'Organization' or 'Person' types.
1420 # both types can have 'name' property(inherited from 'Thing' type). [1]
1421 # however some websites are using 'Text' type instead.
1422 # 1. https://schema.org/VideoObject
1423 'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
bae14048
S
1424 'filesize': float_or_none(e.get('contentSize')),
1425 'tbr': int_or_none(e.get('bitrate')),
1426 'width': int_or_none(e.get('width')),
1427 'height': int_or_none(e.get('height')),
33a81c2c 1428 'view_count': int_or_none(e.get('interactionCount')),
bae14048 1429 })
e7e4a6e0 1430 extract_interaction_statistic(e)
bae14048 1431
46933a15 1432 for e in json_ld:
4433bb02 1433 if '@context' in e:
46933a15
S
1434 item_type = e.get('@type')
1435 if expected_type is not None and expected_type != item_type:
4433bb02 1436 continue
c69701c6 1437 if item_type in ('TVEpisode', 'Episode'):
440863ad 1438 episode_name = unescapeHTML(e.get('name'))
46933a15 1439 info.update({
440863ad 1440 'episode': episode_name,
46933a15
S
1441 'episode_number': int_or_none(e.get('episodeNumber')),
1442 'description': unescapeHTML(e.get('description')),
1443 })
440863ad
S
1444 if not info.get('title') and episode_name:
1445 info['title'] = episode_name
46933a15 1446 part_of_season = e.get('partOfSeason')
c69701c6 1447 if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
458fd30f
S
1448 info.update({
1449 'season': unescapeHTML(part_of_season.get('name')),
1450 'season_number': int_or_none(part_of_season.get('seasonNumber')),
1451 })
d16b3c66 1452 part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
c69701c6 1453 if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
46933a15 1454 info['series'] = unescapeHTML(part_of_series.get('name'))
391256dc
S
1455 elif item_type == 'Movie':
1456 info.update({
1457 'title': unescapeHTML(e.get('name')),
1458 'description': unescapeHTML(e.get('description')),
1459 'duration': parse_duration(e.get('duration')),
1460 'timestamp': unified_timestamp(e.get('dateCreated')),
1461 })
3931b845 1462 elif item_type in ('Article', 'NewsArticle'):
46933a15
S
1463 info.update({
1464 'timestamp': parse_iso8601(e.get('datePublished')),
1465 'title': unescapeHTML(e.get('headline')),
1466 'description': unescapeHTML(e.get('articleBody')),
1467 })
1468 elif item_type == 'VideoObject':
bae14048 1469 extract_video_object(e)
4433bb02
S
1470 if expected_type is None:
1471 continue
1472 else:
1473 break
c69701c6
S
1474 video = e.get('video')
1475 if isinstance(video, dict) and video.get('@type') == 'VideoObject':
1476 extract_video_object(video)
4433bb02
S
1477 if expected_type is None:
1478 continue
1479 else:
1480 break
4ca2a3cf
S
1481 return dict((k, v) for k, v in info.items() if v is not None)
1482
27713812 1483 @staticmethod
f8da79f8 1484 def _hidden_inputs(html):
586f1cc5 1485 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
201ea3ee 1486 hidden_inputs = {}
c8498368
S
1487 for input in re.findall(r'(?i)(<input[^>]+>)', html):
1488 attrs = extract_attributes(input)
1489 if not input:
201ea3ee 1490 continue
c8498368 1491 if attrs.get('type') not in ('hidden', 'submit'):
201ea3ee 1492 continue
c8498368
S
1493 name = attrs.get('name') or attrs.get('id')
1494 value = attrs.get('value')
1495 if name and value is not None:
1496 hidden_inputs[name] = value
201ea3ee 1497 return hidden_inputs
27713812 1498
cf61d96d
S
1499 def _form_hidden_inputs(self, form_id, html):
1500 form = self._search_regex(
73eb13df 1501 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
cf61d96d
S
1502 html, '%s form' % form_id, group='form')
1503 return self._hidden_inputs(form)
1504
eb8a4433 1505 class FormatSort:
b050d210 1506 regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
eb8a4433 1507
8326b00a 1508 default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
155d2b48 1509 'res', 'fps', 'codec:vp9.2', 'size', 'br', 'asr',
53ed7066 1510 'proto', 'ext', 'hasaud', 'source', 'format_id') # These must not be aliases
198e3a04 1511 ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
53ed7066 1512 'height', 'width', 'proto', 'vext', 'abr', 'aext',
1513 'fps', 'fs_approx', 'source', 'format_id')
eb8a4433 1514
1515 settings = {
1516 'vcodec': {'type': 'ordered', 'regex': True,
155d2b48 1517 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
eb8a4433 1518 'acodec': {'type': 'ordered', 'regex': True,
1519 'order': ['opus', 'vorbis', 'aac', 'mp?4a?', 'mp3', 'e?a?c-?3', 'dts', '', None, 'none']},
f137c99e 1520 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
e36d50c5 1521 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.+', '.*dash', 'ws|websocket', '', 'mms|rtsp', 'none', 'f4']},
eb8a4433 1522 'vext': {'type': 'ordered', 'field': 'video_ext',
91ebc640 1523 'order': ('mp4', 'webm', 'flv', '', 'none'),
eb8a4433 1524 'order_free': ('webm', 'mp4', 'flv', '', 'none')},
1525 'aext': {'type': 'ordered', 'field': 'audio_ext',
1526 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'webm', '', 'none'),
1527 'order_free': ('opus', 'ogg', 'webm', 'm4a', 'mp3', 'aac', '', 'none')},
1528 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000},
f5510afe 1529 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
8326b00a 1530 'field': ('vcodec', 'acodec'),
1531 'function': lambda it: int(any(v != 'none' for v in it))},
f983b875 1532 'ie_pref': {'priority': True, 'type': 'extractor'},
63be1aab 1533 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)},
1534 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)},
198e3a04 1535 'lang': {'convert': 'ignore', 'field': 'language_preference'},
6a04a74e 1536 'quality': {'convert': 'float_none', 'default': -1},
eb8a4433 1537 'filesize': {'convert': 'bytes'},
f137c99e 1538 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'},
1539 'id': {'convert': 'string', 'field': 'format_id'},
eb8a4433 1540 'height': {'convert': 'float_none'},
1541 'width': {'convert': 'float_none'},
1542 'fps': {'convert': 'float_none'},
1543 'tbr': {'convert': 'float_none'},
1544 'vbr': {'convert': 'float_none'},
1545 'abr': {'convert': 'float_none'},
1546 'asr': {'convert': 'float_none'},
e4beae70 1547 'source': {'convert': 'ignore', 'field': 'source_preference'},
63be1aab 1548
eb8a4433 1549 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')},
63be1aab 1550 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True},
1551 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')},
1552 'ext': {'type': 'combined', 'field': ('vext', 'aext')},
f5510afe 1553 'res': {'type': 'multiple', 'field': ('height', 'width'),
dbf5416a 1554 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
63be1aab 1555
1556 # Most of these exist only for compatibility reasons
1557 'dimension': {'type': 'alias', 'field': 'res'},
1558 'resolution': {'type': 'alias', 'field': 'res'},
1559 'extension': {'type': 'alias', 'field': 'ext'},
1560 'bitrate': {'type': 'alias', 'field': 'br'},
eb8a4433 1561 'total_bitrate': {'type': 'alias', 'field': 'tbr'},
1562 'video_bitrate': {'type': 'alias', 'field': 'vbr'},
1563 'audio_bitrate': {'type': 'alias', 'field': 'abr'},
1564 'framerate': {'type': 'alias', 'field': 'fps'},
63be1aab 1565 'language_preference': {'type': 'alias', 'field': 'lang'}, # not named as 'language' because such a field exists
1566 'protocol': {'type': 'alias', 'field': 'proto'},
1567 'source_preference': {'type': 'alias', 'field': 'source'},
1568 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
1569 'filesize_estimate': {'type': 'alias', 'field': 'size'},
eb8a4433 1570 'samplerate': {'type': 'alias', 'field': 'asr'},
1571 'video_ext': {'type': 'alias', 'field': 'vext'},
1572 'audio_ext': {'type': 'alias', 'field': 'aext'},
1573 'video_codec': {'type': 'alias', 'field': 'vcodec'},
1574 'audio_codec': {'type': 'alias', 'field': 'acodec'},
63be1aab 1575 'video': {'type': 'alias', 'field': 'hasvid'},
1576 'has_video': {'type': 'alias', 'field': 'hasvid'},
1577 'audio': {'type': 'alias', 'field': 'hasaud'},
1578 'has_audio': {'type': 'alias', 'field': 'hasaud'},
1579 'extractor': {'type': 'alias', 'field': 'ie_pref'},
1580 'preference': {'type': 'alias', 'field': 'ie_pref'},
1581 'extractor_preference': {'type': 'alias', 'field': 'ie_pref'},
1582 'format_id': {'type': 'alias', 'field': 'id'},
1583 }
eb8a4433 1584
1585 _order = []
1586
1587 def _get_field_setting(self, field, key):
1588 if field not in self.settings:
1589 self.settings[field] = {}
1590 propObj = self.settings[field]
1591 if key not in propObj:
1592 type = propObj.get('type')
1593 if key == 'field':
1594 default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field
1595 elif key == 'convert':
1596 default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore'
4bcc7bd1 1597 else:
f5510afe 1598 default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None)
eb8a4433 1599 propObj[key] = default
1600 return propObj[key]
1601
1602 def _resolve_field_value(self, field, value, convertNone=False):
1603 if value is None:
1604 if not convertNone:
1605 return None
4bcc7bd1 1606 else:
eb8a4433 1607 value = value.lower()
1608 conversion = self._get_field_setting(field, 'convert')
1609 if conversion == 'ignore':
1610 return None
1611 if conversion == 'string':
1612 return value
1613 elif conversion == 'float_none':
1614 return float_or_none(value)
1615 elif conversion == 'bytes':
1616 return FileDownloader.parse_bytes(value)
1617 elif conversion == 'order':
da9be05e 1618 order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order')
eb8a4433 1619 use_regex = self._get_field_setting(field, 'regex')
1620 list_length = len(order_list)
1621 empty_pos = order_list.index('') if '' in order_list else list_length + 1
1622 if use_regex and value is not None:
da9be05e 1623 for i, regex in enumerate(order_list):
eb8a4433 1624 if regex and re.match(regex, value):
1625 return list_length - i
1626 return list_length - empty_pos # not in list
1627 else: # not regex or value = None
1628 return list_length - (order_list.index(value) if value in order_list else empty_pos)
1629 else:
1630 if value.isnumeric():
1631 return float(value)
4bcc7bd1 1632 else:
eb8a4433 1633 self.settings[field]['convert'] = 'string'
1634 return value
1635
1636 def evaluate_params(self, params, sort_extractor):
1637 self._use_free_order = params.get('prefer_free_formats', False)
1638 self._sort_user = params.get('format_sort', [])
1639 self._sort_extractor = sort_extractor
1640
1641 def add_item(field, reverse, closest, limit_text):
1642 field = field.lower()
1643 if field in self._order:
1644 return
1645 self._order.append(field)
1646 limit = self._resolve_field_value(field, limit_text)
1647 data = {
1648 'reverse': reverse,
1649 'closest': False if limit is None else closest,
1650 'limit_text': limit_text,
1651 'limit': limit}
1652 if field in self.settings:
1653 self.settings[field].update(data)
1654 else:
1655 self.settings[field] = data
1656
1657 sort_list = (
1658 tuple(field for field in self.default if self._get_field_setting(field, 'forced'))
1659 + (tuple() if params.get('format_sort_force', False)
1660 else tuple(field for field in self.default if self._get_field_setting(field, 'priority')))
1661 + tuple(self._sort_user) + tuple(sort_extractor) + self.default)
1662
1663 for item in sort_list:
1664 match = re.match(self.regex, item)
1665 if match is None:
1666 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item)
1667 field = match.group('field')
1668 if field is None:
1669 continue
1670 if self._get_field_setting(field, 'type') == 'alias':
1671 field = self._get_field_setting(field, 'field')
1672 reverse = match.group('reverse') is not None
b050d210 1673 closest = match.group('separator') == '~'
eb8a4433 1674 limit_text = match.group('limit')
1675
1676 has_limit = limit_text is not None
1677 has_multiple_fields = self._get_field_setting(field, 'type') == 'combined'
1678 has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit')
1679
1680 fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,)
1681 limits = limit_text.split(":") if has_multiple_limits else (limit_text,) if has_limit else tuple()
1682 limit_count = len(limits)
1683 for (i, f) in enumerate(fields):
1684 add_item(f, reverse, closest,
1685 limits[i] if i < limit_count
1686 else limits[0] if has_limit and not has_multiple_limits
1687 else None)
1688
0760b0a7 1689 def print_verbose_info(self, write_debug):
b31fdeed 1690 if self._sort_user:
0760b0a7 1691 write_debug('Sort order given by user: %s' % ', '.join(self._sort_user))
eb8a4433 1692 if self._sort_extractor:
0760b0a7 1693 write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor))
1694 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
eb8a4433 1695 '+' if self._get_field_setting(field, 'reverse') else '', field,
1696 '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':',
1697 self._get_field_setting(field, 'limit_text'),
1698 self._get_field_setting(field, 'limit'))
1699 if self._get_field_setting(field, 'limit_text') is not None else '')
1700 for field in self._order if self._get_field_setting(field, 'visible')]))
1701
1702 def _calculate_field_preference_from_value(self, format, field, type, value):
1703 reverse = self._get_field_setting(field, 'reverse')
1704 closest = self._get_field_setting(field, 'closest')
1705 limit = self._get_field_setting(field, 'limit')
1706
1707 if type == 'extractor':
1708 maximum = self._get_field_setting(field, 'max')
1709 if value is None or (maximum is not None and value >= maximum):
f983b875 1710 value = -1
eb8a4433 1711 elif type == 'boolean':
1712 in_list = self._get_field_setting(field, 'in_list')
1713 not_in_list = self._get_field_setting(field, 'not_in_list')
1714 value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1
1715 elif type == 'ordered':
1716 value = self._resolve_field_value(field, value, True)
1717
1718 # try to convert to number
6a04a74e 1719 val_num = float_or_none(value, default=self._get_field_setting(field, 'default'))
eb8a4433 1720 is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None
1721 if is_num:
1722 value = val_num
1723
1724 return ((-10, 0) if value is None
1725 else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher
1726 else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest
1727 else (0, value, 0) if not reverse and (limit is None or value <= limit)
1728 else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit
1729 else (-1, value, 0))
1730
1731 def _calculate_field_preference(self, format, field):
1732 type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple
1733 get_value = lambda f: format.get(self._get_field_setting(f, 'field'))
1734 if type == 'multiple':
1735 type = 'field' # Only 'field' is allowed in multiple for now
1736 actual_fields = self._get_field_setting(field, 'field')
1737
f5510afe 1738 value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields)
eb8a4433 1739 else:
1740 value = get_value(field)
1741 return self._calculate_field_preference_from_value(format, field, type, value)
1742
1743 def calculate_preference(self, format):
1744 # Determine missing protocol
1745 if not format.get('protocol'):
1746 format['protocol'] = determine_protocol(format)
1747
1748 # Determine missing ext
1749 if not format.get('ext') and 'url' in format:
1750 format['ext'] = determine_ext(format['url'])
1751 if format.get('vcodec') == 'none':
8326b00a 1752 format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none'
eb8a4433 1753 format['video_ext'] = 'none'
1754 else:
1755 format['video_ext'] = format['ext']
1756 format['audio_ext'] = 'none'
1757 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
1758 # format['preference'] = -1000
1759
1760 # Determine missing bitrates
1761 if format.get('tbr') is None:
1762 if format.get('vbr') is not None and format.get('abr') is not None:
1763 format['tbr'] = format.get('vbr', 0) + format.get('abr', 0)
1764 else:
1765 if format.get('vcodec') != "none" and format.get('vbr') is None:
1766 format['vbr'] = format.get('tbr') - format.get('abr', 0)
1767 if format.get('acodec') != "none" and format.get('abr') is None:
1768 format['abr'] = format.get('tbr') - format.get('vbr', 0)
1769
1770 return tuple(self._calculate_field_preference(format, field) for field in self._order)
1771
1772 def _sort_formats(self, formats, field_preference=[]):
1773 if not formats:
88acdbc2 1774 return
eb8a4433 1775 format_sort = self.FormatSort() # params and to_screen are taken from the downloader
1776 format_sort.evaluate_params(self._downloader.params, field_preference)
a06916d9 1777 if self.get_param('verbose', False):
0760b0a7 1778 format_sort.print_verbose_info(self._downloader.write_debug)
eb8a4433 1779 formats.sort(key=lambda f: format_sort.calculate_preference(f))
59040888 1780
96a53167
S
1781 def _check_formats(self, formats, video_id):
1782 if formats:
1783 formats[:] = filter(
1784 lambda f: self._is_valid_url(
1785 f['url'], video_id,
1786 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1787 formats)
1788
f5bdb444
S
1789 @staticmethod
1790 def _remove_duplicate_formats(formats):
1791 format_urls = set()
1792 unique_formats = []
1793 for f in formats:
1794 if f['url'] not in format_urls:
1795 format_urls.add(f['url'])
1796 unique_formats.append(f)
1797 formats[:] = unique_formats
1798
45024183 1799 def _is_valid_url(self, url, video_id, item='video', headers={}):
2f0f6578
S
1800 url = self._proto_relative_url(url, scheme='http:')
1801 # For now assume non HTTP(S) URLs always valid
1802 if not (url.startswith('http://') or url.startswith('https://')):
1803 return True
96a53167 1804 try:
45024183 1805 self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
96a53167 1806 return True
8bdd16b4 1807 except ExtractorError as e:
25e911a9 1808 self.to_screen(
8bdd16b4 1809 '%s: %s URL is invalid, skipping: %s'
1810 % (video_id, item, error_to_compat_str(e.cause)))
25e911a9 1811 return False
96a53167 1812
20991253 1813 def http_scheme(self):
1ede5b24 1814 """ Either "http:" or "https:", depending on the user's preferences """
20991253
PH
1815 return (
1816 'http:'
a06916d9 1817 if self.get_param('prefer_insecure', False)
20991253
PH
1818 else 'https:')
1819
57c7411f
PH
1820 def _proto_relative_url(self, url, scheme=None):
1821 if url is None:
1822 return url
1823 if url.startswith('//'):
1824 if scheme is None:
1825 scheme = self.http_scheme()
1826 return scheme + url
1827 else:
1828 return url
1829
4094b6e3
PH
1830 def _sleep(self, timeout, video_id, msg_template=None):
1831 if msg_template is None:
f1a9d64e 1832 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
4094b6e3
PH
1833 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
1834 self.to_screen(msg)
1835 time.sleep(timeout)
1836
f983b875 1837 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
4de61310 1838 transform_source=lambda s: fix_xml_ampersands(s).strip(),
7360c06f 1839 fatal=True, m3u8_id=None, data=None, headers={}, query={}):
f036a632
JMF
1840 manifest = self._download_xml(
1841 manifest_url, video_id, 'Downloading f4m manifest',
97f4aecf
S
1842 'Unable to download f4m manifest',
1843 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
067aa17e 1844 # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
4de61310 1845 transform_source=transform_source,
7360c06f 1846 fatal=fatal, data=data, headers=headers, query=query)
4de61310
S
1847
1848 if manifest is False:
8d29e47f 1849 return []
31bb8d3f 1850
0fdbb332 1851 return self._parse_f4m_formats(
f983b875 1852 manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
448bb5f3 1853 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
0fdbb332 1854
f983b875 1855 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
0fdbb332 1856 transform_source=lambda s: fix_xml_ampersands(s).strip(),
448bb5f3 1857 fatal=True, m3u8_id=None):
ee0ba927 1858 if not isinstance(manifest, compat_etree_Element) and not fatal:
d9eb580a
S
1859 return []
1860
7a5c1cfe 1861 # currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
fb72ec58 1862 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1863 if akamai_pv is not None and ';' in akamai_pv.text:
1864 playerVerificationChallenge = akamai_pv.text.split(';')[0]
1865 if playerVerificationChallenge.strip() != '':
1866 return []
1867
31bb8d3f 1868 formats = []
7a47d07c 1869 manifest_version = '1.0'
b2527359 1870 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
34e48bed 1871 if not media_nodes:
7a47d07c 1872 manifest_version = '2.0'
34e48bed 1873 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
b22ca762 1874 # Remove unsupported DRM protected media from final formats
067aa17e 1875 # rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
b22ca762
S
1876 media_nodes = remove_encrypted_media(media_nodes)
1877 if not media_nodes:
1878 return formats
48107c19
S
1879
1880 manifest_base_url = get_base_url(manifest)
0a5685b2 1881
a6571f10 1882 bootstrap_info = xpath_element(
0a5685b2
YCH
1883 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1884 'bootstrap info', default=None)
1885
edd6074c
RA
1886 vcodec = None
1887 mime_type = xpath_text(
1888 manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
1889 'base URL', default=None)
1890 if mime_type and mime_type.startswith('audio/'):
1891 vcodec = 'none'
1892
b2527359 1893 for i, media_el in enumerate(media_nodes):
77b8b4e6
S
1894 tbr = int_or_none(media_el.attrib.get('bitrate'))
1895 width = int_or_none(media_el.attrib.get('width'))
1896 height = int_or_none(media_el.attrib.get('height'))
1897 format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
448bb5f3
YCH
1898 # If <bootstrapInfo> is present, the specified f4m is a
1899 # stream-level manifest, and only set-level manifests may refer to
1900 # external resources. See section 11.4 and section 4 of F4M spec
1901 if bootstrap_info is None:
1902 media_url = None
1903 # @href is introduced in 2.0, see section 11.6 of F4M spec
1904 if manifest_version == '2.0':
1905 media_url = media_el.attrib.get('href')
1906 if media_url is None:
1907 media_url = media_el.attrib.get('url')
31c746e5
S
1908 if not media_url:
1909 continue
cc357c4d
S
1910 manifest_url = (
1911 media_url if media_url.startswith('http://') or media_url.startswith('https://')
48107c19 1912 else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
70f0f5a8
S
1913 # If media_url is itself a f4m manifest do the recursive extraction
1914 # since bitrates in parent manifest (this one) and media_url manifest
1915 # may differ leading to inability to resolve the format by requested
1916 # bitrate in f4m downloader
240b6045
YCH
1917 ext = determine_ext(manifest_url)
1918 if ext == 'f4m':
77b8b4e6 1919 f4m_formats = self._extract_f4m_formats(
f983b875 1920 manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
77b8b4e6
S
1921 transform_source=transform_source, fatal=fatal)
1922 # Sometimes stream-level manifest contains single media entry that
1923 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1924 # At the same time parent's media entry in set-level manifest may
1925 # contain it. We will copy it from parent in such cases.
1926 if len(f4m_formats) == 1:
1927 f = f4m_formats[0]
1928 f.update({
1929 'tbr': f.get('tbr') or tbr,
1930 'width': f.get('width') or width,
1931 'height': f.get('height') or height,
1932 'format_id': f.get('format_id') if not tbr else format_id,
edd6074c 1933 'vcodec': vcodec,
77b8b4e6
S
1934 })
1935 formats.extend(f4m_formats)
70f0f5a8 1936 continue
240b6045
YCH
1937 elif ext == 'm3u8':
1938 formats.extend(self._extract_m3u8_formats(
1939 manifest_url, video_id, 'mp4', preference=preference,
f983b875 1940 quality=quality, m3u8_id=m3u8_id, fatal=fatal))
240b6045 1941 continue
31bb8d3f 1942 formats.append({
77b8b4e6 1943 'format_id': format_id,
31bb8d3f 1944 'url': manifest_url,
30d0b549 1945 'manifest_url': manifest_url,
a6571f10 1946 'ext': 'flv' if bootstrap_info is not None else None,
187ee66c 1947 'protocol': 'f4m',
b2527359 1948 'tbr': tbr,
77b8b4e6
S
1949 'width': width,
1950 'height': height,
edd6074c 1951 'vcodec': vcodec,
60ca389c 1952 'preference': preference,
f983b875 1953 'quality': quality,
31bb8d3f 1954 })
31bb8d3f
JMF
1955 return formats
1956
f983b875 1957 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None):
16da9bbc 1958 return {
f207019c 1959 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
704df56d
PH
1960 'url': m3u8_url,
1961 'ext': ext,
1962 'protocol': 'm3u8',
37768f92 1963 'preference': preference - 100 if preference else -100,
f983b875 1964 'quality': quality,
704df56d
PH
1965 'resolution': 'multiple',
1966 'format_note': 'Quality selection URL',
16da9bbc
YCH
1967 }
1968
a0c3b2d5
F
1969 def _extract_m3u8_formats(self, *args, **kwargs):
1970 fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs)
1971 if subs:
1972 self.report_warning(bug_reports_message(
1973 "Ignoring subtitle tracks found in the HLS manifest; "
1974 "if any subtitle tracks are missing,"
28f436ba 1975 ), only_once=True)
a0c3b2d5
F
1976 return fmts
1977
1978 def _extract_m3u8_formats_and_subtitles(
177877c5 1979 self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native',
a0c3b2d5
F
1980 preference=None, quality=None, m3u8_id=None, note=None,
1981 errnote=None, fatal=True, live=False, data=None, headers={},
1982 query={}):
1983
dbd82a1d 1984 res = self._download_webpage_handle(
81515ad9 1985 m3u8_url, video_id,
37a3bb66 1986 note='Downloading m3u8 information' if note is None else note,
1987 errnote='Failed to download m3u8 information' if errnote is None else errnote,
7360c06f 1988 fatal=fatal, data=data, headers=headers, query=query)
cb252080 1989
dbd82a1d 1990 if res is False:
a0c3b2d5 1991 return [], {}
cb252080 1992
dbd82a1d 1993 m3u8_doc, urlh = res
37113045 1994 m3u8_url = urlh.geturl()
9cdffeeb 1995
a0c3b2d5 1996 return self._parse_m3u8_formats_and_subtitles(
cb252080 1997 m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
310c2ed2 1998 preference=preference, quality=quality, m3u8_id=m3u8_id,
1999 note=note, errnote=errnote, fatal=fatal, live=live, data=data,
2000 headers=headers, query=query, video_id=video_id)
cb252080 2001
a0c3b2d5 2002 def _parse_m3u8_formats_and_subtitles(
177877c5 2003 self, m3u8_doc, m3u8_url, ext=None, entry_protocol='m3u8_native',
a0c3b2d5
F
2004 preference=None, quality=None, m3u8_id=None, live=False, note=None,
2005 errnote=None, fatal=True, data=None, headers={}, query={},
2006 video_id=None):
60755938 2007 formats, subtitles = [], {}
a0c3b2d5 2008
08a00eef 2009 if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
60755938 2010 return formats, subtitles
08a00eef 2011
88acdbc2 2012 has_drm = re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc)
a0c3b2d5 2013
60755938 2014 def format_url(url):
2015 return url if re.match(r'^https?://', url) else compat_urlparse.urljoin(m3u8_url, url)
2016
2017 if self.get_param('hls_split_discontinuity', False):
2018 def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
2019 if not m3u8_doc:
2020 if not manifest_url:
2021 return []
2022 m3u8_doc = self._download_webpage(
2023 manifest_url, video_id, fatal=fatal, data=data, headers=headers,
2024 note=False, errnote='Failed to download m3u8 playlist information')
2025 if m3u8_doc is False:
2026 return []
2027 return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines()))
0def7587 2028
60755938 2029 else:
2030 def _extract_m3u8_playlist_indices(*args, **kwargs):
2031 return [None]
310c2ed2 2032
cb252080
S
2033 # References:
2034 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
067aa17e
S
2035 # 2. https://github.com/ytdl-org/youtube-dl/issues/12211
2036 # 3. https://github.com/ytdl-org/youtube-dl/issues/18923
cb252080
S
2037
2038 # We should try extracting formats only from master playlists [1, 4.3.4],
2039 # i.e. playlists that describe available qualities. On the other hand
2040 # media playlists [1, 4.3.3] should be returned as is since they contain
2041 # just the media without qualities renditions.
9cdffeeb 2042 # Fortunately, master playlist can be easily distinguished from media
cb252080 2043 # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
a0566bbf 2044 # master playlist tags MUST NOT appear in a media playlist and vice versa.
cb252080
S
2045 # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
2046 # media playlist and MUST NOT appear in master playlist thus we can
2047 # clearly detect media playlist with this criterion.
2048
9cdffeeb 2049 if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
60755938 2050 formats = [{
2051 'format_id': '-'.join(map(str, filter(None, [m3u8_id, idx]))),
2052 'format_index': idx,
2053 'url': m3u8_url,
2054 'ext': ext,
2055 'protocol': entry_protocol,
2056 'preference': preference,
2057 'quality': quality,
88acdbc2 2058 'has_drm': has_drm,
60755938 2059 } for idx in _extract_m3u8_playlist_indices(m3u8_doc=m3u8_doc)]
310c2ed2 2060
a0c3b2d5 2061 return formats, subtitles
cb252080
S
2062
2063 groups = {}
2064 last_stream_inf = {}
2065
2066 def extract_media(x_media_line):
2067 media = parse_m3u8_attributes(x_media_line)
2068 # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
2069 media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
2070 if not (media_type and group_id and name):
2071 return
2072 groups.setdefault(group_id, []).append(media)
a0c3b2d5
F
2073 # <https://tools.ietf.org/html/rfc8216#section-4.3.4.1>
2074 if media_type == 'SUBTITLES':
3907333c 2075 # According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the
2076 # EXT-X-MEDIA tag if the media type is SUBTITLES.
2077 # However, lack of URI has been spotted in the wild.
2078 # e.g. NebulaIE; see https://github.com/yt-dlp/yt-dlp/issues/339
2079 if not media.get('URI'):
2080 return
a0c3b2d5
F
2081 url = format_url(media['URI'])
2082 sub_info = {
2083 'url': url,
2084 'ext': determine_ext(url),
2085 }
4a2f19ab
F
2086 if sub_info['ext'] == 'm3u8':
2087 # Per RFC 8216 §3.1, the only possible subtitle format m3u8
2088 # files may contain is WebVTT:
2089 # <https://tools.ietf.org/html/rfc8216#section-3.1>
2090 sub_info['ext'] = 'vtt'
2091 sub_info['protocol'] = 'm3u8_native'
37a3bb66 2092 lang = media.get('LANGUAGE') or 'und'
a0c3b2d5 2093 subtitles.setdefault(lang, []).append(sub_info)
cb252080
S
2094 if media_type not in ('VIDEO', 'AUDIO'):
2095 return
2096 media_url = media.get('URI')
2097 if media_url:
310c2ed2 2098 manifest_url = format_url(media_url)
60755938 2099 formats.extend({
2100 'format_id': '-'.join(map(str, filter(None, (m3u8_id, group_id, name, idx)))),
2101 'format_note': name,
2102 'format_index': idx,
2103 'url': manifest_url,
2104 'manifest_url': m3u8_url,
2105 'language': media.get('LANGUAGE'),
2106 'ext': ext,
2107 'protocol': entry_protocol,
2108 'preference': preference,
2109 'quality': quality,
2110 'vcodec': 'none' if media_type == 'AUDIO' else None,
2111 } for idx in _extract_m3u8_playlist_indices(manifest_url))
cb252080
S
2112
2113 def build_stream_name():
2114 # Despite specification does not mention NAME attribute for
3019cb0c
S
2115 # EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
2116 # or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
ddd258f9 2117 # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
cb252080
S
2118 stream_name = last_stream_inf.get('NAME')
2119 if stream_name:
2120 return stream_name
2121 # If there is no NAME in EXT-X-STREAM-INF it will be obtained
2122 # from corresponding rendition group
2123 stream_group_id = last_stream_inf.get('VIDEO')
2124 if not stream_group_id:
2125 return
2126 stream_group = groups.get(stream_group_id)
2127 if not stream_group:
2128 return stream_group_id
2129 rendition = stream_group[0]
2130 return rendition.get('NAME') or stream_group_id
2131
379306ef 2132 # parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
2bfc1d9d
RA
2133 # chance to detect video only formats when EXT-X-STREAM-INF tags
2134 # precede EXT-X-MEDIA tags in HLS manifest such as [3].
2135 for line in m3u8_doc.splitlines():
2136 if line.startswith('#EXT-X-MEDIA:'):
2137 extract_media(line)
2138
704df56d
PH
2139 for line in m3u8_doc.splitlines():
2140 if line.startswith('#EXT-X-STREAM-INF:'):
cb252080 2141 last_stream_inf = parse_m3u8_attributes(line)
704df56d
PH
2142 elif line.startswith('#') or not line.strip():
2143 continue
2144 else:
9c99bef7 2145 tbr = float_or_none(
3089bc74
S
2146 last_stream_inf.get('AVERAGE-BANDWIDTH')
2147 or last_stream_inf.get('BANDWIDTH'), scale=1000)
30d0b549 2148 manifest_url = format_url(line.strip())
5ef62fc4 2149
60755938 2150 for idx in _extract_m3u8_playlist_indices(manifest_url):
2151 format_id = [m3u8_id, None, idx]
310c2ed2 2152 # Bandwidth of live streams may differ over time thus making
2153 # format_id unpredictable. So it's better to keep provided
2154 # format_id intact.
2155 if not live:
60755938 2156 stream_name = build_stream_name()
2157 format_id[1] = stream_name if stream_name else '%d' % (tbr if tbr else len(formats))
310c2ed2 2158 f = {
60755938 2159 'format_id': '-'.join(map(str, filter(None, format_id))),
2160 'format_index': idx,
310c2ed2 2161 'url': manifest_url,
2162 'manifest_url': m3u8_url,
2163 'tbr': tbr,
2164 'ext': ext,
2165 'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
2166 'protocol': entry_protocol,
2167 'preference': preference,
2168 'quality': quality,
2169 }
2170 resolution = last_stream_inf.get('RESOLUTION')
2171 if resolution:
2172 mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
2173 if mobj:
2174 f['width'] = int(mobj.group('width'))
2175 f['height'] = int(mobj.group('height'))
2176 # Unified Streaming Platform
2177 mobj = re.search(
2178 r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
2179 if mobj:
2180 abr, vbr = mobj.groups()
2181 abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
2182 f.update({
2183 'vbr': vbr,
2184 'abr': abr,
2185 })
2186 codecs = parse_codecs(last_stream_inf.get('CODECS'))
2187 f.update(codecs)
2188 audio_group_id = last_stream_inf.get('AUDIO')
2189 # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
2190 # references a rendition group MUST have a CODECS attribute.
2191 # However, this is not always respected, for example, [2]
2192 # contains EXT-X-STREAM-INF tag which references AUDIO
2193 # rendition group but does not have CODECS and despite
2194 # referencing an audio group it represents a complete
2195 # (with audio and video) format. So, for such cases we will
2196 # ignore references to rendition groups and treat them
2197 # as complete formats.
2198 if audio_group_id and codecs and f.get('vcodec') != 'none':
2199 audio_group = groups.get(audio_group_id)
2200 if audio_group and audio_group[0].get('URI'):
2201 # TODO: update acodec for audio only formats with
2202 # the same GROUP-ID
2203 f['acodec'] = 'none'
fc21af50 2204 if not f.get('ext'):
2205 f['ext'] = 'm4a' if f.get('vcodec') == 'none' else 'mp4'
310c2ed2 2206 formats.append(f)
2207
2208 # for DailyMotion
2209 progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
2210 if progressive_uri:
2211 http_f = f.copy()
2212 del http_f['manifest_url']
2213 http_f.update({
2214 'format_id': f['format_id'].replace('hls-', 'http-'),
2215 'protocol': 'http',
2216 'url': progressive_uri,
2217 })
2218 formats.append(http_f)
5ef62fc4 2219
cb252080 2220 last_stream_inf = {}
a0c3b2d5 2221 return formats, subtitles
704df56d 2222
3cf4b91d
C
2223 def _extract_m3u8_vod_duration(
2224 self, m3u8_vod_url, video_id, note=None, errnote=None, data=None, headers={}, query={}):
2225
2226 m3u8_vod = self._download_webpage(
2227 m3u8_vod_url, video_id,
2228 note='Downloading m3u8 VOD manifest' if note is None else note,
2229 errnote='Failed to download VOD manifest' if errnote is None else errnote,
2230 fatal=False, data=data, headers=headers, query=query)
2231
2232 return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id)
2233
2234 def _parse_m3u8_vod_duration(self, m3u8_vod, video_id):
2235 if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod:
2236 return None
2237
2238 return int(sum(
2239 float(line[len('#EXTINF:'):].split(',')[0])
2240 for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None
2241
a107193e
S
2242 @staticmethod
2243 def _xpath_ns(path, namespace=None):
2244 if not namespace:
2245 return path
2246 out = []
2247 for c in path.split('/'):
2248 if not c or c == '.':
2249 out.append(c)
2250 else:
2251 out.append('{%s}%s' % (namespace, c))
2252 return '/'.join(out)
2253
da1c94ee 2254 def _extract_smil_formats_and_subtitles(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
09f572fb 2255 smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
a107193e 2256
995029a1
PH
2257 if smil is False:
2258 assert not fatal
2259 return []
e89a2aab 2260
17712eeb 2261 namespace = self._parse_smil_namespace(smil)
a107193e 2262
da1c94ee 2263 fmts = self._parse_smil_formats(
a107193e 2264 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
da1c94ee
F
2265 subs = self._parse_smil_subtitles(
2266 smil, namespace=namespace)
2267
2268 return fmts, subs
2269
2270 def _extract_smil_formats(self, *args, **kwargs):
2271 fmts, subs = self._extract_smil_formats_and_subtitles(*args, **kwargs)
2272 if subs:
2273 self.report_warning(bug_reports_message(
2274 "Ignoring subtitle tracks found in the SMIL manifest; "
2275 "if any subtitle tracks are missing,"
28f436ba 2276 ), only_once=True)
da1c94ee 2277 return fmts
a107193e
S
2278
2279 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
2280 smil = self._download_smil(smil_url, video_id, fatal=fatal)
2281 if smil is False:
2282 return {}
2283 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
2284
09f572fb 2285 def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
a107193e
S
2286 return self._download_xml(
2287 smil_url, video_id, 'Downloading SMIL file',
09f572fb 2288 'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
a107193e
S
2289
2290 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
17712eeb 2291 namespace = self._parse_smil_namespace(smil)
a107193e
S
2292
2293 formats = self._parse_smil_formats(
2294 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
2295 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
2296
2297 video_id = os.path.splitext(url_basename(smil_url))[0]
2298 title = None
2299 description = None
647eab45 2300 upload_date = None
a107193e
S
2301 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
2302 name = meta.attrib.get('name')
2303 content = meta.attrib.get('content')
2304 if not name or not content:
2305 continue
2306 if not title and name == 'title':
2307 title = content
2308 elif not description and name in ('description', 'abstract'):
2309 description = content
647eab45
S
2310 elif not upload_date and name == 'date':
2311 upload_date = unified_strdate(content)
a107193e 2312
1e5bcdec
S
2313 thumbnails = [{
2314 'id': image.get('type'),
2315 'url': image.get('src'),
2316 'width': int_or_none(image.get('width')),
2317 'height': int_or_none(image.get('height')),
2318 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
2319
a107193e
S
2320 return {
2321 'id': video_id,
2322 'title': title or video_id,
2323 'description': description,
647eab45 2324 'upload_date': upload_date,
1e5bcdec 2325 'thumbnails': thumbnails,
a107193e
S
2326 'formats': formats,
2327 'subtitles': subtitles,
2328 }
2329
17712eeb
S
2330 def _parse_smil_namespace(self, smil):
2331 return self._search_regex(
2332 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
2333
f877c6ae 2334 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
a107193e
S
2335 base = smil_url
2336 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
2337 b = meta.get('base') or meta.get('httpBase')
2338 if b:
2339 base = b
2340 break
e89a2aab
S
2341
2342 formats = []
2343 rtmp_count = 0
a107193e 2344 http_count = 0
7f32e5dc 2345 m3u8_count = 0
9359f3d4 2346 imgs_count = 0
a107193e 2347
9359f3d4 2348 srcs = set()
ad96b4c8
YCH
2349 media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
2350 for medium in media:
2351 src = medium.get('src')
81e1c4e2 2352 if not src or src in srcs:
a107193e 2353 continue
9359f3d4 2354 srcs.add(src)
a107193e 2355
ad96b4c8
YCH
2356 bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
2357 filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
2358 width = int_or_none(medium.get('width'))
2359 height = int_or_none(medium.get('height'))
2360 proto = medium.get('proto')
2361 ext = medium.get('ext')
a107193e 2362 src_ext = determine_ext(src)
ad96b4c8 2363 streamer = medium.get('streamer') or base
a107193e
S
2364
2365 if proto == 'rtmp' or streamer.startswith('rtmp'):
2366 rtmp_count += 1
2367 formats.append({
2368 'url': streamer,
2369 'play_path': src,
2370 'ext': 'flv',
2371 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
2372 'tbr': bitrate,
2373 'filesize': filesize,
2374 'width': width,
2375 'height': height,
2376 })
f877c6ae
YCH
2377 if transform_rtmp_url:
2378 streamer, src = transform_rtmp_url(streamer, src)
2379 formats[-1].update({
2380 'url': streamer,
2381 'play_path': src,
2382 })
a107193e
S
2383 continue
2384
2385 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
c349456e 2386 src_url = src_url.strip()
a107193e
S
2387
2388 if proto == 'm3u8' or src_ext == 'm3u8':
7f32e5dc 2389 m3u8_formats = self._extract_m3u8_formats(
2390 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
2391 if len(m3u8_formats) == 1:
2392 m3u8_count += 1
2393 m3u8_formats[0].update({
2394 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
2395 'tbr': bitrate,
2396 'width': width,
2397 'height': height,
2398 })
2399 formats.extend(m3u8_formats)
bd21ead2 2400 elif src_ext == 'f4m':
a107193e
S
2401 f4m_url = src_url
2402 if not f4m_params:
2403 f4m_params = {
2404 'hdcore': '3.2.0',
2405 'plugin': 'flowplayer-3.2.0.1',
2406 }
2407 f4m_url += '&' if '?' in f4m_url else '?'
15707c7e 2408 f4m_url += compat_urllib_parse_urlencode(f4m_params)
7e5edcfd 2409 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
bd21ead2
RA
2410 elif src_ext == 'mpd':
2411 formats.extend(self._extract_mpd_formats(
2412 src_url, video_id, mpd_id='dash', fatal=False))
2413 elif re.search(r'\.ism/[Mm]anifest', src_url):
2414 formats.extend(self._extract_ism_formats(
2415 src_url, video_id, ism_id='mss', fatal=False))
2416 elif src_url.startswith('http') and self._is_valid_url(src, video_id):
a107193e
S
2417 http_count += 1
2418 formats.append({
2419 'url': src_url,
2420 'ext': ext or src_ext or 'flv',
2421 'format_id': 'http-%d' % (bitrate or http_count),
2422 'tbr': bitrate,
2423 'filesize': filesize,
2424 'width': width,
2425 'height': height,
2426 })
63757032 2427
9359f3d4
F
2428 for medium in smil.findall(self._xpath_ns('.//imagestream', namespace)):
2429 src = medium.get('src')
2430 if not src or src in srcs:
2431 continue
2432 srcs.add(src)
2433
2434 imgs_count += 1
2435 formats.append({
2436 'format_id': 'imagestream-%d' % (imgs_count),
2437 'url': src,
2438 'ext': mimetype2ext(medium.get('type')),
2439 'acodec': 'none',
2440 'vcodec': 'none',
2441 'width': int_or_none(medium.get('width')),
2442 'height': int_or_none(medium.get('height')),
2443 'format_note': 'SMIL storyboards',
2444 })
2445
e89a2aab
S
2446 return formats
2447
ce00af87 2448 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
d413095f 2449 urls = []
a107193e
S
2450 subtitles = {}
2451 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
2452 src = textstream.get('src')
d413095f 2453 if not src or src in urls:
a107193e 2454 continue
d413095f 2455 urls.append(src)
df634be2 2456 ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
03bc7237 2457 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
a107193e
S
2458 subtitles.setdefault(lang, []).append({
2459 'url': src,
2460 'ext': ext,
2461 })
2462 return subtitles
63757032 2463
47a5cb77 2464 def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
942acef5 2465 xspf = self._download_xml(
47a5cb77 2466 xspf_url, playlist_id, 'Downloading xpsf playlist',
942acef5
S
2467 'Unable to download xspf manifest', fatal=fatal)
2468 if xspf is False:
2469 return []
47a5cb77
S
2470 return self._parse_xspf(
2471 xspf, playlist_id, xspf_url=xspf_url,
2472 xspf_base_url=base_url(xspf_url))
8d6765cf 2473
47a5cb77 2474 def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
8d6765cf
S
2475 NS_MAP = {
2476 'xspf': 'http://xspf.org/ns/0/',
2477 's1': 'http://static.streamone.nl/player/ns/0',
2478 }
2479
2480 entries = []
47a5cb77 2481 for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
8d6765cf 2482 title = xpath_text(
98044462 2483 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
8d6765cf
S
2484 description = xpath_text(
2485 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
2486 thumbnail = xpath_text(
2487 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
2488 duration = float_or_none(
2489 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
2490
47a5cb77
S
2491 formats = []
2492 for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
2493 format_url = urljoin(xspf_base_url, location.text)
2494 if not format_url:
2495 continue
2496 formats.append({
2497 'url': format_url,
2498 'manifest_url': xspf_url,
2499 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
2500 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
2501 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
2502 })
8d6765cf
S
2503 self._sort_formats(formats)
2504
2505 entries.append({
2506 'id': playlist_id,
2507 'title': title,
2508 'description': description,
2509 'thumbnail': thumbnail,
2510 'duration': duration,
2511 'formats': formats,
2512 })
2513 return entries
2514
171e59ed
F
2515 def _extract_mpd_formats(self, *args, **kwargs):
2516 fmts, subs = self._extract_mpd_formats_and_subtitles(*args, **kwargs)
2517 if subs:
2518 self.report_warning(bug_reports_message(
2519 "Ignoring subtitle tracks found in the DASH manifest; "
2520 "if any subtitle tracks are missing,"
28f436ba 2521 ), only_once=True)
171e59ed
F
2522 return fmts
2523
2524 def _extract_mpd_formats_and_subtitles(
2525 self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
2526 fatal=True, data=None, headers={}, query={}):
47a5cb77 2527 res = self._download_xml_handle(
1bac3455 2528 mpd_url, video_id,
37a3bb66 2529 note='Downloading MPD manifest' if note is None else note,
2530 errnote='Failed to download MPD manifest' if errnote is None else errnote,
7360c06f 2531 fatal=fatal, data=data, headers=headers, query=query)
1bac3455 2532 if res is False:
171e59ed 2533 return [], {}
47a5cb77 2534 mpd_doc, urlh = res
c25720ef 2535 if mpd_doc is None:
171e59ed 2536 return [], {}
02dc0a36 2537 mpd_base_url = base_url(urlh.geturl())
1bac3455 2538
171e59ed 2539 return self._parse_mpd_formats_and_subtitles(
545cc85d 2540 mpd_doc, mpd_id, mpd_base_url, mpd_url)
2d2fa82d 2541
171e59ed
F
2542 def _parse_mpd_formats(self, *args, **kwargs):
2543 fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
2544 if subs:
2545 self.report_warning(bug_reports_message(
2546 "Ignoring subtitle tracks found in the DASH manifest; "
2547 "if any subtitle tracks are missing,"
28f436ba 2548 ), only_once=True)
171e59ed
F
2549 return fmts
2550
2551 def _parse_mpd_formats_and_subtitles(
2552 self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
f0948348
S
2553 """
2554 Parse formats from MPD manifest.
2555 References:
2556 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
2557 http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2558 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
2559 """
a06916d9 2560 if not self.get_param('dynamic_mpd', True):
78895bd3 2561 if mpd_doc.get('type') == 'dynamic':
171e59ed 2562 return [], {}
2d2fa82d 2563
91cb6b50 2564 namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
f14be228 2565
2566 def _add_ns(path):
2567 return self._xpath_ns(path, namespace)
2568
675d0016 2569 def is_drm_protected(element):
2570 return element.find(_add_ns('ContentProtection')) is not None
2571
1bac3455 2572 def extract_multisegment_info(element, ms_parent_info):
2573 ms_info = ms_parent_info.copy()
b4c1d6e8
S
2574
2575 # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
2576 # common attributes and elements. We will only extract relevant
2577 # for us.
2578 def extract_common(source):
2579 segment_timeline = source.find(_add_ns('SegmentTimeline'))
2580 if segment_timeline is not None:
2581 s_e = segment_timeline.findall(_add_ns('S'))
2582 if s_e:
2583 ms_info['total_number'] = 0
2584 ms_info['s'] = []
2585 for s in s_e:
2586 r = int(s.get('r', 0))
2587 ms_info['total_number'] += 1 + r
2588 ms_info['s'].append({
2589 't': int(s.get('t', 0)),
2590 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
2591 'd': int(s.attrib['d']),
2592 'r': r,
2593 })
2594 start_number = source.get('startNumber')
2595 if start_number:
2596 ms_info['start_number'] = int(start_number)
2597 timescale = source.get('timescale')
2598 if timescale:
2599 ms_info['timescale'] = int(timescale)
2600 segment_duration = source.get('duration')
2601 if segment_duration:
48504785 2602 ms_info['segment_duration'] = float(segment_duration)
b4c1d6e8
S
2603
2604 def extract_Initialization(source):
2605 initialization = source.find(_add_ns('Initialization'))
2606 if initialization is not None:
2607 ms_info['initialization_url'] = initialization.attrib['sourceURL']
2608
f14be228 2609 segment_list = element.find(_add_ns('SegmentList'))
1bac3455 2610 if segment_list is not None:
b4c1d6e8
S
2611 extract_common(segment_list)
2612 extract_Initialization(segment_list)
f14be228 2613 segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
1bac3455 2614 if segment_urls_e:
2615 ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
1bac3455 2616 else:
f14be228 2617 segment_template = element.find(_add_ns('SegmentTemplate'))
1bac3455 2618 if segment_template is not None:
b4c1d6e8 2619 extract_common(segment_template)
e228616c
S
2620 media = segment_template.get('media')
2621 if media:
2622 ms_info['media'] = media
1bac3455 2623 initialization = segment_template.get('initialization')
2624 if initialization:
e228616c 2625 ms_info['initialization'] = initialization
1bac3455 2626 else:
b4c1d6e8 2627 extract_Initialization(segment_template)
1bac3455 2628 return ms_info
b323e170 2629
1bac3455 2630 mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
6251555f 2631 formats, subtitles = [], {}
2632 stream_numbers = {'audio': 0, 'video': 0}
f14be228 2633 for period in mpd_doc.findall(_add_ns('Period')):
1bac3455 2634 period_duration = parse_duration(period.get('duration')) or mpd_duration
2635 period_ms_info = extract_multisegment_info(period, {
2636 'start_number': 1,
2637 'timescale': 1,
2638 })
f14be228 2639 for adaptation_set in period.findall(_add_ns('AdaptationSet')):
1bac3455 2640 adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
f14be228 2641 for representation in adaptation_set.findall(_add_ns('Representation')):
1bac3455 2642 representation_attrib = adaptation_set.attrib.copy()
2643 representation_attrib.update(representation.attrib)
f0948348 2644 # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
a6c8b759 2645 mime_type = representation_attrib['mimeType']
171e59ed
F
2646 content_type = representation_attrib.get('contentType', mime_type.split('/')[0])
2647
be2fc5b2 2648 codecs = representation_attrib.get('codecs', '')
2649 if content_type not in ('video', 'audio', 'text'):
2650 if mime_type == 'image/jpeg':
a8731fcc 2651 content_type = mime_type
2652 elif codecs.split('.')[0] == 'stpp':
be2fc5b2 2653 content_type = 'text'
cdb19aa4 2654 else:
be2fc5b2 2655 self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
2656 continue
2657
2658 base_url = ''
2659 for element in (representation, adaptation_set, period, mpd_doc):
2660 base_url_e = element.find(_add_ns('BaseURL'))
2661 if base_url_e is not None:
2662 base_url = base_url_e.text + base_url
2663 if re.match(r'^https?://', base_url):
2664 break
f9cc0161
D
2665 if mpd_base_url and base_url.startswith('/'):
2666 base_url = compat_urlparse.urljoin(mpd_base_url, base_url)
2667 elif mpd_base_url and not re.match(r'^https?://', base_url):
2668 if not mpd_base_url.endswith('/'):
be2fc5b2 2669 mpd_base_url += '/'
2670 base_url = mpd_base_url + base_url
2671 representation_id = representation_attrib.get('id')
2672 lang = representation_attrib.get('lang')
2673 url_el = representation.find(_add_ns('BaseURL'))
2674 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
2675 bandwidth = int_or_none(representation_attrib.get('bandwidth'))
2676 if representation_id is not None:
2677 format_id = representation_id
2678 else:
2679 format_id = content_type
2680 if mpd_id:
2681 format_id = mpd_id + '-' + format_id
2682 if content_type in ('video', 'audio'):
2683 f = {
2684 'format_id': format_id,
2685 'manifest_url': mpd_url,
2686 'ext': mimetype2ext(mime_type),
2687 'width': int_or_none(representation_attrib.get('width')),
2688 'height': int_or_none(representation_attrib.get('height')),
2689 'tbr': float_or_none(bandwidth, 1000),
2690 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
2691 'fps': int_or_none(representation_attrib.get('frameRate')),
2692 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
2693 'format_note': 'DASH %s' % content_type,
2694 'filesize': filesize,
2695 'container': mimetype2ext(mime_type) + '_dash',
6251555f 2696 'manifest_stream_number': stream_numbers[content_type]
be2fc5b2 2697 }
2698 f.update(parse_codecs(codecs))
6251555f 2699 stream_numbers[content_type] += 1
be2fc5b2 2700 elif content_type == 'text':
2701 f = {
2702 'ext': mimetype2ext(mime_type),
2703 'manifest_url': mpd_url,
2704 'filesize': filesize,
2705 }
2706 elif content_type == 'image/jpeg':
2707 # See test case in VikiIE
2708 # https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
2709 f = {
2710 'format_id': format_id,
2711 'ext': 'mhtml',
2712 'manifest_url': mpd_url,
2713 'format_note': 'DASH storyboards (jpeg)',
2714 'acodec': 'none',
2715 'vcodec': 'none',
2716 }
88acdbc2 2717 if is_drm_protected(adaptation_set) or is_drm_protected(representation):
2718 f['has_drm'] = True
be2fc5b2 2719 representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
2720
2721 def prepare_template(template_name, identifiers):
2722 tmpl = representation_ms_info[template_name]
2723 # First of, % characters outside $...$ templates
2724 # must be escaped by doubling for proper processing
2725 # by % operator string formatting used further (see
2726 # https://github.com/ytdl-org/youtube-dl/issues/16867).
2727 t = ''
2728 in_template = False
2729 for c in tmpl:
2730 t += c
2731 if c == '$':
2732 in_template = not in_template
2733 elif c == '%' and not in_template:
eca1f0d1 2734 t += c
be2fc5b2 2735 # Next, $...$ templates are translated to their
2736 # %(...) counterparts to be used with % operator
2737 if representation_id is not None:
2738 t = t.replace('$RepresentationID$', representation_id)
2739 t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
2740 t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
2741 t.replace('$$', '$')
2742 return t
2743
2744 # @initialization is a regular template like @media one
2745 # so it should be handled just the same way (see
2746 # https://github.com/ytdl-org/youtube-dl/issues/11605)
2747 if 'initialization' in representation_ms_info:
2748 initialization_template = prepare_template(
2749 'initialization',
2750 # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
2751 # $Time$ shall not be included for @initialization thus
2752 # only $Bandwidth$ remains
2753 ('Bandwidth', ))
2754 representation_ms_info['initialization_url'] = initialization_template % {
2755 'Bandwidth': bandwidth,
2756 }
2757
2758 def location_key(location):
2759 return 'url' if re.match(r'^https?://', location) else 'path'
2760
2761 if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
2762
2763 media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
2764 media_location_key = location_key(media_template)
2765
2766 # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
2767 # can't be used at the same time
2768 if '%(Number' in media_template and 's' not in representation_ms_info:
2769 segment_duration = None
2770 if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
2771 segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
2772 representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
2773 representation_ms_info['fragments'] = [{
2774 media_location_key: media_template % {
2775 'Number': segment_number,
2776 'Bandwidth': bandwidth,
2777 },
2778 'duration': segment_duration,
2779 } for segment_number in range(
2780 representation_ms_info['start_number'],
2781 representation_ms_info['total_number'] + representation_ms_info['start_number'])]
2782 else:
2783 # $Number*$ or $Time$ in media template with S list available
2784 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
2785 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
2786 representation_ms_info['fragments'] = []
2787 segment_time = 0
2788 segment_d = None
2789 segment_number = representation_ms_info['start_number']
2790
2791 def add_segment_url():
2792 segment_url = media_template % {
2793 'Time': segment_time,
2794 'Bandwidth': bandwidth,
2795 'Number': segment_number,
2796 }
2797 representation_ms_info['fragments'].append({
2798 media_location_key: segment_url,
2799 'duration': float_or_none(segment_d, representation_ms_info['timescale']),
2800 })
2801
2802 for num, s in enumerate(representation_ms_info['s']):
2803 segment_time = s.get('t') or segment_time
2804 segment_d = s['d']
2805 add_segment_url()
2806 segment_number += 1
2807 for r in range(s.get('r', 0)):
2808 segment_time += segment_d
f0948348 2809 add_segment_url()
b4c1d6e8 2810 segment_number += 1
be2fc5b2 2811 segment_time += segment_d
2812 elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
2813 # No media template
2814 # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
2815 # or any YouTube dashsegments video
2816 fragments = []
2817 segment_index = 0
2818 timescale = representation_ms_info['timescale']
2819 for s in representation_ms_info['s']:
2820 duration = float_or_none(s['d'], timescale)
2821 for r in range(s.get('r', 0) + 1):
2822 segment_uri = representation_ms_info['segment_urls'][segment_index]
2823 fragments.append({
2824 location_key(segment_uri): segment_uri,
2825 'duration': duration,
2826 })
2827 segment_index += 1
2828 representation_ms_info['fragments'] = fragments
2829 elif 'segment_urls' in representation_ms_info:
2830 # Segment URLs with no SegmentTimeline
2831 # Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
2832 # https://github.com/ytdl-org/youtube-dl/pull/14844
2833 fragments = []
2834 segment_duration = float_or_none(
2835 representation_ms_info['segment_duration'],
2836 representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
2837 for segment_url in representation_ms_info['segment_urls']:
2838 fragment = {
2839 location_key(segment_url): segment_url,
2840 }
2841 if segment_duration:
2842 fragment['duration'] = segment_duration
2843 fragments.append(fragment)
2844 representation_ms_info['fragments'] = fragments
2845 # If there is a fragments key available then we correctly recognized fragmented media.
2846 # Otherwise we will assume unfragmented media with direct access. Technically, such
2847 # assumption is not necessarily correct since we may simply have no support for
2848 # some forms of fragmented media renditions yet, but for now we'll use this fallback.
2849 if 'fragments' in representation_ms_info:
2850 f.update({
2851 # NB: mpd_url may be empty when MPD manifest is parsed from a string
2852 'url': mpd_url or base_url,
2853 'fragment_base_url': base_url,
2854 'fragments': [],
2855 'protocol': 'http_dash_segments' if mime_type != 'image/jpeg' else 'mhtml',
2856 })
2857 if 'initialization_url' in representation_ms_info:
2858 initialization_url = representation_ms_info['initialization_url']
2859 if not f.get('url'):
2860 f['url'] = initialization_url
2861 f['fragments'].append({location_key(initialization_url): initialization_url})
2862 f['fragments'].extend(representation_ms_info['fragments'])
17b598d3 2863 else:
be2fc5b2 2864 # Assuming direct URL to unfragmented media.
2865 f['url'] = base_url
2866 if content_type in ('video', 'audio') or mime_type == 'image/jpeg':
2867 formats.append(f)
2868 elif content_type == 'text':
2869 subtitles.setdefault(lang or 'und', []).append(f)
2870
171e59ed 2871 return formats, subtitles
17b598d3 2872
fd76a142
F
2873 def _extract_ism_formats(self, *args, **kwargs):
2874 fmts, subs = self._extract_ism_formats_and_subtitles(*args, **kwargs)
2875 if subs:
2876 self.report_warning(bug_reports_message(
2877 "Ignoring subtitle tracks found in the ISM manifest; "
2878 "if any subtitle tracks are missing,"
2879 ))
2880 return fmts
2881
2882 def _extract_ism_formats_and_subtitles(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
47a5cb77 2883 res = self._download_xml_handle(
b2758123 2884 ism_url, video_id,
37a3bb66 2885 note='Downloading ISM manifest' if note is None else note,
2886 errnote='Failed to download ISM manifest' if errnote is None else errnote,
7360c06f 2887 fatal=fatal, data=data, headers=headers, query=query)
b2758123 2888 if res is False:
fd76a142 2889 return [], {}
47a5cb77 2890 ism_doc, urlh = res
13b08034 2891 if ism_doc is None:
fd76a142 2892 return [], {}
b2758123 2893
fd76a142 2894 return self._parse_ism_formats_and_subtitles(ism_doc, urlh.geturl(), ism_id)
b2758123 2895
fd76a142 2896 def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
76d5a363
S
2897 """
2898 Parse formats from ISM manifest.
2899 References:
2900 1. [MS-SSTR]: Smooth Streaming Protocol,
2901 https://msdn.microsoft.com/en-us/library/ff469518.aspx
2902 """
06869367 2903 if ism_doc.get('IsLive') == 'TRUE':
fd76a142 2904 return [], {}
b2758123 2905
b2758123
RA
2906 duration = int(ism_doc.attrib['Duration'])
2907 timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
2908
2909 formats = []
fd76a142 2910 subtitles = {}
b2758123
RA
2911 for stream in ism_doc.findall('StreamIndex'):
2912 stream_type = stream.get('Type')
fd76a142 2913 if stream_type not in ('video', 'audio', 'text'):
b2758123
RA
2914 continue
2915 url_pattern = stream.attrib['Url']
2916 stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
2917 stream_name = stream.get('Name')
fd76a142 2918 stream_language = stream.get('Language', 'und')
b2758123 2919 for track in stream.findall('QualityLevel'):
e2efe599 2920 fourcc = track.get('FourCC') or ('AACL' if track.get('AudioTag') == '255' else None)
b2758123 2921 # TODO: add support for WVC1 and WMAP
66a1b864 2922 if fourcc not in ('H264', 'AVC1', 'AACL', 'TTML'):
b2758123
RA
2923 self.report_warning('%s is not a supported codec' % fourcc)
2924 continue
2925 tbr = int(track.attrib['Bitrate']) // 1000
76d5a363
S
2926 # [1] does not mention Width and Height attributes. However,
2927 # they're often present while MaxWidth and MaxHeight are
2928 # missing, so should be used as fallbacks
2929 width = int_or_none(track.get('MaxWidth') or track.get('Width'))
2930 height = int_or_none(track.get('MaxHeight') or track.get('Height'))
b2758123
RA
2931 sampling_rate = int_or_none(track.get('SamplingRate'))
2932
2933 track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
2934 track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
2935
2936 fragments = []
2937 fragment_ctx = {
2938 'time': 0,
2939 }
2940 stream_fragments = stream.findall('c')
2941 for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
2942 fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
2943 fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
2944 fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
2945 if not fragment_ctx['duration']:
2946 try:
2947 next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
2948 except IndexError:
2949 next_fragment_time = duration
1616f9b4 2950 fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
b2758123
RA
2951 for _ in range(fragment_repeat):
2952 fragments.append({
1616f9b4 2953 'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
b2758123
RA
2954 'duration': fragment_ctx['duration'] / stream_timescale,
2955 })
2956 fragment_ctx['time'] += fragment_ctx['duration']
2957
2958 format_id = []
2959 if ism_id:
2960 format_id.append(ism_id)
2961 if stream_name:
2962 format_id.append(stream_name)
2963 format_id.append(compat_str(tbr))
2964
fd76a142
F
2965 if stream_type == 'text':
2966 subtitles.setdefault(stream_language, []).append({
2967 'ext': 'ismt',
2968 'protocol': 'ism',
2969 'url': ism_url,
2970 'manifest_url': ism_url,
2971 'fragments': fragments,
2972 '_download_params': {
2973 'stream_type': stream_type,
2974 'duration': duration,
2975 'timescale': stream_timescale,
2976 'fourcc': fourcc,
2977 'language': stream_language,
2978 'codec_private_data': track.get('CodecPrivateData'),
2979 }
2980 })
2981 elif stream_type in ('video', 'audio'):
2982 formats.append({
2983 'format_id': '-'.join(format_id),
2984 'url': ism_url,
2985 'manifest_url': ism_url,
2986 'ext': 'ismv' if stream_type == 'video' else 'isma',
2987 'width': width,
2988 'height': height,
2989 'tbr': tbr,
2990 'asr': sampling_rate,
2991 'vcodec': 'none' if stream_type == 'audio' else fourcc,
2992 'acodec': 'none' if stream_type == 'video' else fourcc,
2993 'protocol': 'ism',
2994 'fragments': fragments,
88acdbc2 2995 'has_drm': ism_doc.find('Protection') is not None,
fd76a142
F
2996 '_download_params': {
2997 'stream_type': stream_type,
2998 'duration': duration,
2999 'timescale': stream_timescale,
3000 'width': width or 0,
3001 'height': height or 0,
3002 'fourcc': fourcc,
3003 'language': stream_language,
3004 'codec_private_data': track.get('CodecPrivateData'),
3005 'sampling_rate': sampling_rate,
3006 'channels': int_or_none(track.get('Channels', 2)),
3007 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
3008 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
3009 },
3010 })
3011 return formats, subtitles
b2758123 3012
f983b875 3013 def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None, quality=None):
6780154e
S
3014 def absolute_url(item_url):
3015 return urljoin(base_url, item_url)
59bbe491 3016
3017 def parse_content_type(content_type):
3018 if not content_type:
3019 return {}
3020 ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
3021 if ctr:
3022 mimetype, codecs = ctr.groups()
3023 f = parse_codecs(codecs)
3024 f['ext'] = mimetype2ext(mimetype)
3025 return f
3026 return {}
3027
868f79db 3028 def _media_formats(src, cur_media_type, type_info={}):
520251c0 3029 full_url = absolute_url(src)
82889d4a 3030 ext = type_info.get('ext') or determine_ext(full_url)
87a449c1 3031 if ext == 'm3u8':
520251c0
YCH
3032 is_plain_url = False
3033 formats = self._extract_m3u8_formats(
ad120ae1 3034 full_url, video_id, ext='mp4',
eeb0a956 3035 entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
f983b875 3036 preference=preference, quality=quality, fatal=False)
87a449c1
S
3037 elif ext == 'mpd':
3038 is_plain_url = False
3039 formats = self._extract_mpd_formats(
b359e977 3040 full_url, video_id, mpd_id=mpd_id, fatal=False)
520251c0
YCH
3041 else:
3042 is_plain_url = True
3043 formats = [{
3044 'url': full_url,
3045 'vcodec': 'none' if cur_media_type == 'audio' else None,
3046 }]
3047 return is_plain_url, formats
3048
59bbe491 3049 entries = []
4328ddf8
S
3050 # amp-video and amp-audio are very similar to their HTML5 counterparts
3051 # so we wll include them right here (see
3052 # https://www.ampproject.org/docs/reference/components/amp-video)
29f7c58a 3053 # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
3054 _MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
3055 media_tags = [(media_tag, media_tag_name, media_type, '')
3056 for media_tag, media_tag_name, media_type
3057 in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
2aec7256
S
3058 media_tags.extend(re.findall(
3059 # We only allow video|audio followed by a whitespace or '>'.
3060 # Allowing more characters may end up in significant slow down (see
067aa17e 3061 # https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
2aec7256 3062 # http://www.porntrex.com/maps/videositemap.xml).
29f7c58a 3063 r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
3064 for media_tag, _, media_type, media_content in media_tags:
59bbe491 3065 media_info = {
3066 'formats': [],
3067 'subtitles': {},
3068 }
3069 media_attributes = extract_attributes(media_tag)
f856816b 3070 src = strip_or_none(media_attributes.get('src'))
59bbe491 3071 if src:
dedb1770 3072 _, formats = _media_formats(src, media_type)
520251c0 3073 media_info['formats'].extend(formats)
6780154e 3074 media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
59bbe491 3075 if media_content:
3076 for source_tag in re.findall(r'<source[^>]+>', media_content):
d493f15c
S
3077 s_attr = extract_attributes(source_tag)
3078 # data-video-src and data-src are non standard but seen
3079 # several times in the wild
f856816b 3080 src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
59bbe491 3081 if not src:
3082 continue
d493f15c 3083 f = parse_content_type(s_attr.get('type'))
868f79db 3084 is_plain_url, formats = _media_formats(src, media_type, f)
520251c0 3085 if is_plain_url:
d493f15c
S
3086 # width, height, res, label and title attributes are
3087 # all not standard but seen several times in the wild
3088 labels = [
3089 s_attr.get(lbl)
3090 for lbl in ('label', 'title')
3091 if str_or_none(s_attr.get(lbl))
3092 ]
3093 width = int_or_none(s_attr.get('width'))
3089bc74
S
3094 height = (int_or_none(s_attr.get('height'))
3095 or int_or_none(s_attr.get('res')))
d493f15c
S
3096 if not width or not height:
3097 for lbl in labels:
3098 resolution = parse_resolution(lbl)
3099 if not resolution:
3100 continue
3101 width = width or resolution.get('width')
3102 height = height or resolution.get('height')
3103 for lbl in labels:
3104 tbr = parse_bitrate(lbl)
3105 if tbr:
3106 break
3107 else:
3108 tbr = None
1ed45499 3109 f.update({
d493f15c
S
3110 'width': width,
3111 'height': height,
3112 'tbr': tbr,
3113 'format_id': s_attr.get('label') or s_attr.get('title'),
1ed45499 3114 })
520251c0
YCH
3115 f.update(formats[0])
3116 media_info['formats'].append(f)
3117 else:
3118 media_info['formats'].extend(formats)
59bbe491 3119 for track_tag in re.findall(r'<track[^>]+>', media_content):
3120 track_attributes = extract_attributes(track_tag)
3121 kind = track_attributes.get('kind')
5968d7d2 3122 if not kind or kind in ('subtitles', 'captions'):
f856816b 3123 src = strip_or_none(track_attributes.get('src'))
59bbe491 3124 if not src:
3125 continue
3126 lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
3127 media_info['subtitles'].setdefault(lang, []).append({
3128 'url': absolute_url(src),
3129 })
5e8e2fa5
S
3130 for f in media_info['formats']:
3131 f.setdefault('http_headers', {})['Referer'] = base_url
5968d7d2 3132 if media_info['formats'] or media_info['subtitles']:
59bbe491 3133 entries.append(media_info)
3134 return entries
3135
f6a1d69a
F
3136 def _extract_akamai_formats(self, *args, **kwargs):
3137 fmts, subs = self._extract_akamai_formats_and_subtitles(*args, **kwargs)
3138 if subs:
3139 self.report_warning(bug_reports_message(
3140 "Ignoring subtitle tracks found in the manifests; "
3141 "if any subtitle tracks are missing,"
3142 ))
3143 return fmts
3144
3145 def _extract_akamai_formats_and_subtitles(self, manifest_url, video_id, hosts={}):
29f7c58a 3146 signed = 'hdnea=' in manifest_url
3147 if not signed:
3148 # https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
3149 manifest_url = re.sub(
3150 r'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
3151 '', manifest_url).strip('?')
3152
c7c43a93 3153 formats = []
f6a1d69a 3154 subtitles = {}
70c5802b 3155
e71a4509 3156 hdcore_sign = 'hdcore=3.7.0'
ff6f9a67 3157 f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
c4251b9a
RA
3158 hds_host = hosts.get('hds')
3159 if hds_host:
3160 f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
e71a4509
RA
3161 if 'hdcore=' not in f4m_url:
3162 f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
3163 f4m_formats = self._extract_f4m_formats(
3164 f4m_url, video_id, f4m_id='hds', fatal=False)
3165 for entry in f4m_formats:
3166 entry.update({'extra_param_to_segment_url': hdcore_sign})
3167 formats.extend(f4m_formats)
70c5802b 3168
c4251b9a
RA
3169 m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
3170 hls_host = hosts.get('hls')
3171 if hls_host:
3172 m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
f6a1d69a 3173 m3u8_formats, m3u8_subtitles = self._extract_m3u8_formats_and_subtitles(
c7c43a93 3174 m3u8_url, video_id, 'mp4', 'm3u8_native',
29f7c58a 3175 m3u8_id='hls', fatal=False)
3176 formats.extend(m3u8_formats)
f6a1d69a 3177 subtitles = self._merge_subtitles(subtitles, m3u8_subtitles)
70c5802b 3178
3179 http_host = hosts.get('http')
29f7c58a 3180 if http_host and m3u8_formats and not signed:
3181 REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
70c5802b 3182 qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
3183 qualities_length = len(qualities)
29f7c58a 3184 if len(m3u8_formats) in (qualities_length, qualities_length + 1):
70c5802b 3185 i = 0
29f7c58a 3186 for f in m3u8_formats:
3187 if f['vcodec'] != 'none':
70c5802b 3188 for protocol in ('http', 'https'):
3189 http_f = f.copy()
3190 del http_f['manifest_url']
3191 http_url = re.sub(
29f7c58a 3192 REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
70c5802b 3193 http_f.update({
3194 'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
3195 'url': http_url,
3196 'protocol': protocol,
3197 })
29f7c58a 3198 formats.append(http_f)
70c5802b 3199 i += 1
70c5802b 3200
f6a1d69a 3201 return formats, subtitles
c7c43a93 3202
6ad02195 3203 def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
044eeb14 3204 query = compat_urlparse.urlparse(url).query
6ad02195 3205 url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
240f2622
S
3206 mobj = re.search(
3207 r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
3208 url_base = mobj.group('url')
3209 http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
6ad02195 3210 formats = []
044eeb14
S
3211
3212 def manifest_url(manifest):
3213 m_url = '%s/%s' % (http_base_url, manifest)
3214 if query:
3215 m_url += '?%s' % query
3216 return m_url
3217
6ad02195
RA
3218 if 'm3u8' not in skip_protocols:
3219 formats.extend(self._extract_m3u8_formats(
044eeb14 3220 manifest_url('playlist.m3u8'), video_id, 'mp4',
6ad02195
RA
3221 m3u8_entry_protocol, m3u8_id='hls', fatal=False))
3222 if 'f4m' not in skip_protocols:
3223 formats.extend(self._extract_f4m_formats(
044eeb14 3224 manifest_url('manifest.f4m'),
6ad02195 3225 video_id, f4m_id='hds', fatal=False))
0384932e
RA
3226 if 'dash' not in skip_protocols:
3227 formats.extend(self._extract_mpd_formats(
044eeb14 3228 manifest_url('manifest.mpd'),
0384932e 3229 video_id, mpd_id='dash', fatal=False))
6ad02195 3230 if re.search(r'(?:/smil:|\.smil)', url_base):
6ad02195
RA
3231 if 'smil' not in skip_protocols:
3232 rtmp_formats = self._extract_smil_formats(
044eeb14 3233 manifest_url('jwplayer.smil'),
6ad02195
RA
3234 video_id, fatal=False)
3235 for rtmp_format in rtmp_formats:
3236 rtsp_format = rtmp_format.copy()
3237 rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
3238 del rtsp_format['play_path']
3239 del rtsp_format['ext']
3240 rtsp_format.update({
3241 'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
3242 'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
3243 'protocol': 'rtsp',
3244 })
3245 formats.extend([rtmp_format, rtsp_format])
3246 else:
3247 for protocol in ('rtmp', 'rtsp'):
3248 if protocol not in skip_protocols:
3249 formats.append({
f2e2f0c7 3250 'url': '%s:%s' % (protocol, url_base),
6ad02195
RA
3251 'format_id': protocol,
3252 'protocol': protocol,
3253 })
3254 return formats
3255
c73e330e 3256 def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
a4a554a7 3257 mobj = re.search(
ac9c69ac 3258 r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
a4a554a7
YCH
3259 webpage)
3260 if mobj:
c73e330e
RU
3261 try:
3262 jwplayer_data = self._parse_json(mobj.group('options'),
3263 video_id=video_id,
3264 transform_source=transform_source)
3265 except ExtractorError:
3266 pass
3267 else:
3268 if isinstance(jwplayer_data, dict):
3269 return jwplayer_data
a4a554a7
YCH
3270
3271 def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
c73e330e
RU
3272 jwplayer_data = self._find_jwplayer_data(
3273 webpage, video_id, transform_source=js_to_json)
a4a554a7
YCH
3274 return self._parse_jwplayer_data(
3275 jwplayer_data, video_id, *args, **kwargs)
3276
3277 def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
3278 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3279 # JWPlayer backward compatibility: flattened playlists
3280 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
3281 if 'playlist' not in jwplayer_data:
3282 jwplayer_data = {'playlist': [jwplayer_data]}
3283
3284 entries = []
3285
3286 # JWPlayer backward compatibility: single playlist item
3287 # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
3288 if not isinstance(jwplayer_data['playlist'], list):
3289 jwplayer_data['playlist'] = [jwplayer_data['playlist']]
3290
3291 for video_data in jwplayer_data['playlist']:
3292 # JWPlayer backward compatibility: flattened sources
3293 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
3294 if 'sources' not in video_data:
3295 video_data['sources'] = [video_data]
3296
3297 this_video_id = video_id or video_data['mediaid']
3298
1a2192cb
S
3299 formats = self._parse_jwplayer_formats(
3300 video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
3301 mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
a4a554a7
YCH
3302
3303 subtitles = {}
3304 tracks = video_data.get('tracks')
3305 if tracks and isinstance(tracks, list):
3306 for track in tracks:
96a2daa1
S
3307 if not isinstance(track, dict):
3308 continue
f4b74272
S
3309 track_kind = track.get('kind')
3310 if not track_kind or not isinstance(track_kind, compat_str):
3311 continue
3312 if track_kind.lower() not in ('captions', 'subtitles'):
a4a554a7
YCH
3313 continue
3314 track_url = urljoin(base_url, track.get('file'))
3315 if not track_url:
3316 continue
3317 subtitles.setdefault(track.get('label') or 'en', []).append({
3318 'url': self._proto_relative_url(track_url)
3319 })
3320
50d808f5 3321 entry = {
a4a554a7 3322 'id': this_video_id,
50d808f5 3323 'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
f81dd65b 3324 'description': clean_html(video_data.get('description')),
6945b9e7 3325 'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
a4a554a7
YCH
3326 'timestamp': int_or_none(video_data.get('pubdate')),
3327 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
3328 'subtitles': subtitles,
50d808f5
RA
3329 }
3330 # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
3331 if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
3332 entry.update({
3333 '_type': 'url_transparent',
3334 'url': formats[0]['url'],
3335 })
3336 else:
3337 self._sort_formats(formats)
3338 entry['formats'] = formats
3339 entries.append(entry)
a4a554a7
YCH
3340 if len(entries) == 1:
3341 return entries[0]
3342 else:
3343 return self.playlist_result(entries)
3344
ed0cf9b3
S
3345 def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
3346 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
bf1b87cd 3347 urls = []
ed0cf9b3 3348 formats = []
1a2192cb 3349 for source in jwplayer_sources_data:
0a268c6e
S
3350 if not isinstance(source, dict):
3351 continue
6945b9e7
RA
3352 source_url = urljoin(
3353 base_url, self._proto_relative_url(source.get('file')))
3354 if not source_url or source_url in urls:
bf1b87cd
RA
3355 continue
3356 urls.append(source_url)
ed0cf9b3
S
3357 source_type = source.get('type') or ''
3358 ext = mimetype2ext(source_type) or determine_ext(source_url)
3359 if source_type == 'hls' or ext == 'm3u8':
3360 formats.extend(self._extract_m3u8_formats(
0236cd0d
S
3361 source_url, video_id, 'mp4', entry_protocol='m3u8_native',
3362 m3u8_id=m3u8_id, fatal=False))
0d9c48de 3363 elif source_type == 'dash' or ext == 'mpd':
ed0cf9b3
S
3364 formats.extend(self._extract_mpd_formats(
3365 source_url, video_id, mpd_id=mpd_id, fatal=False))
b51dc9db
S
3366 elif ext == 'smil':
3367 formats.extend(self._extract_smil_formats(
3368 source_url, video_id, fatal=False))
ed0cf9b3 3369 # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
0236cd0d
S
3370 elif source_type.startswith('audio') or ext in (
3371 'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
ed0cf9b3
S
3372 formats.append({
3373 'url': source_url,
3374 'vcodec': 'none',
3375 'ext': ext,
3376 })
3377 else:
3378 height = int_or_none(source.get('height'))
3379 if height is None:
3380 # Often no height is provided but there is a label in
0236cd0d 3381 # format like "1080p", "720p SD", or 1080.
ed0cf9b3 3382 height = int_or_none(self._search_regex(
0236cd0d 3383 r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
ed0cf9b3
S
3384 'height', default=None))
3385 a_format = {
3386 'url': source_url,
3387 'width': int_or_none(source.get('width')),
3388 'height': height,
0236cd0d 3389 'tbr': int_or_none(source.get('bitrate')),
ed0cf9b3
S
3390 'ext': ext,
3391 }
3392 if source_url.startswith('rtmp'):
3393 a_format['ext'] = 'flv'
ed0cf9b3
S
3394 # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
3395 # of jwplayer.flash.swf
3396 rtmp_url_parts = re.split(
3397 r'((?:mp4|mp3|flv):)', source_url, 1)
3398 if len(rtmp_url_parts) == 3:
3399 rtmp_url, prefix, play_path = rtmp_url_parts
3400 a_format.update({
3401 'url': rtmp_url,
3402 'play_path': prefix + play_path,
3403 })
3404 if rtmp_params:
3405 a_format.update(rtmp_params)
3406 formats.append(a_format)
3407 return formats
3408
f4b1c7ad
PH
3409 def _live_title(self, name):
3410 """ Generate the title for a live video """
3411 now = datetime.datetime.now()
611c1dd9 3412 now_str = now.strftime('%Y-%m-%d %H:%M')
f4b1c7ad
PH
3413 return name + ' ' + now_str
3414
b14f3a4c
PH
3415 def _int(self, v, name, fatal=False, **kwargs):
3416 res = int_or_none(v, **kwargs)
3417 if 'get_attr' in kwargs:
3418 print(getattr(v, kwargs['get_attr']))
3419 if res is None:
3420 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
3421 if fatal:
3422 raise ExtractorError(msg)
3423 else:
6a39ee13 3424 self.report_warning(msg)
b14f3a4c
PH
3425 return res
3426
3427 def _float(self, v, name, fatal=False, **kwargs):
3428 res = float_or_none(v, **kwargs)
3429 if res is None:
3430 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
3431 if fatal:
3432 raise ExtractorError(msg)
3433 else:
6a39ee13 3434 self.report_warning(msg)
b14f3a4c
PH
3435 return res
3436
40e41780
TF
3437 def _set_cookie(self, domain, name, value, expire_time=None, port=None,
3438 path='/', secure=False, discard=False, rest={}, **kwargs):
6c22cee6 3439 cookie = compat_cookiejar_Cookie(
4ed2d7b7 3440 0, name, value, port, port is not None, domain, True,
40e41780
TF
3441 domain.startswith('.'), path, True, secure, expire_time,
3442 discard, None, None, rest)
42939b61
JMF
3443 self._downloader.cookiejar.set_cookie(cookie)
3444
799207e8 3445 def _get_cookies(self, url):
f7ad7160 3446 """ Return a compat_cookies_SimpleCookie with the cookies for the url """
5c2266df 3447 req = sanitized_Request(url)
799207e8 3448 self._downloader.cookiejar.add_cookie_header(req)
f7ad7160 3449 return compat_cookies_SimpleCookie(req.get_header('Cookie'))
799207e8 3450
e3c1266f 3451 def _apply_first_set_cookie_header(self, url_handle, cookie):
ce2fe4c0
S
3452 """
3453 Apply first Set-Cookie header instead of the last. Experimental.
3454
3455 Some sites (e.g. [1-3]) may serve two cookies under the same name
3456 in Set-Cookie header and expect the first (old) one to be set rather
3457 than second (new). However, as of RFC6265 the newer one cookie
3458 should be set into cookie store what actually happens.
3459 We will workaround this issue by resetting the cookie to
3460 the first one manually.
3461 1. https://new.vk.com/
3462 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3463 3. https://learning.oreilly.com/
3464 """
e3c1266f
S
3465 for header, cookies in url_handle.headers.items():
3466 if header.lower() != 'set-cookie':
3467 continue
3468 if sys.version_info[0] >= 3:
3469 cookies = cookies.encode('iso-8859-1')
3470 cookies = cookies.decode('utf-8')
3471 cookie_value = re.search(
3472 r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
3473 if cookie_value:
3474 value, domain = cookie_value.groups()
3475 self._set_cookie(domain, cookie, value)
3476 break
3477
05900629
PH
3478 def get_testcases(self, include_onlymatching=False):
3479 t = getattr(self, '_TEST', None)
3480 if t:
3481 assert not hasattr(self, '_TESTS'), \
3482 '%s has _TEST and _TESTS' % type(self).__name__
3483 tests = [t]
3484 else:
3485 tests = getattr(self, '_TESTS', [])
3486 for t in tests:
3487 if not include_onlymatching and t.get('only_matching', False):
3488 continue
3489 t['name'] = type(self).__name__[:-len('IE')]
3490 yield t
3491
3492 def is_suitable(self, age_limit):
3493 """ Test whether the extractor is generally suitable for the given
3494 age limit (i.e. pornographic sites are not, all others usually are) """
3495
3496 any_restricted = False
3497 for tc in self.get_testcases(include_onlymatching=False):
40090e8d 3498 if tc.get('playlist', []):
05900629
PH
3499 tc = tc['playlist'][0]
3500 is_restricted = age_restricted(
3501 tc.get('info_dict', {}).get('age_limit'), age_limit)
3502 if not is_restricted:
3503 return True
3504 any_restricted = any_restricted or is_restricted
3505 return not any_restricted
3506
a504ced0 3507 def extract_subtitles(self, *args, **kwargs):
a06916d9 3508 if (self.get_param('writesubtitles', False)
3509 or self.get_param('listsubtitles')):
9868ea49
JMF
3510 return self._get_subtitles(*args, **kwargs)
3511 return {}
a504ced0
JMF
3512
3513 def _get_subtitles(self, *args, **kwargs):
611c1dd9 3514 raise NotImplementedError('This method must be implemented by subclasses')
a504ced0 3515
912e0b7e
YCH
3516 @staticmethod
3517 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
3518 """ Merge subtitle items for one language. Items with duplicated URLs
3519 will be dropped. """
3520 list1_urls = set([item['url'] for item in subtitle_list1])
3521 ret = list(subtitle_list1)
3522 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
3523 return ret
3524
3525 @classmethod
46890374 3526 def _merge_subtitles(cls, *dicts, target=None):
19bb3920 3527 """ Merge subtitle dictionaries, language by language. """
19bb3920
F
3528 if target is None:
3529 target = {}
3530 for d in dicts:
3531 for lang, subs in d.items():
3532 target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
3533 return target
912e0b7e 3534
360e1ca5 3535 def extract_automatic_captions(self, *args, **kwargs):
a06916d9 3536 if (self.get_param('writeautomaticsub', False)
3537 or self.get_param('listsubtitles')):
9868ea49
JMF
3538 return self._get_automatic_captions(*args, **kwargs)
3539 return {}
360e1ca5
JMF
3540
3541 def _get_automatic_captions(self, *args, **kwargs):
611c1dd9 3542 raise NotImplementedError('This method must be implemented by subclasses')
360e1ca5 3543
d77ab8e2 3544 def mark_watched(self, *args, **kwargs):
1813a6cc 3545 if not self.get_param('mark_watched', False):
3546 return
3547 if (self._get_login_info()[0] is not None
3548 or self.get_param('cookiefile')
3549 or self.get_param('cookiesfrombrowser')):
d77ab8e2
S
3550 self._mark_watched(*args, **kwargs)
3551
3552 def _mark_watched(self, *args, **kwargs):
3553 raise NotImplementedError('This method must be implemented by subclasses')
3554
38cce791
YCH
3555 def geo_verification_headers(self):
3556 headers = {}
a06916d9 3557 geo_verification_proxy = self.get_param('geo_verification_proxy')
38cce791
YCH
3558 if geo_verification_proxy:
3559 headers['Ytdl-request-proxy'] = geo_verification_proxy
3560 return headers
3561
98763ee3
YCH
3562 def _generic_id(self, url):
3563 return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
3564
3565 def _generic_title(self, url):
3566 return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
3567
c224251a 3568 @staticmethod
b0089e89 3569 def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
c224251a
M
3570 all_known = all(map(
3571 lambda x: x is not None,
3572 (is_private, needs_premium, needs_subscription, needs_auth, is_unlisted)))
3573 return (
3574 'private' if is_private
3575 else 'premium_only' if needs_premium
3576 else 'subscriber_only' if needs_subscription
3577 else 'needs_auth' if needs_auth
3578 else 'unlisted' if is_unlisted
3579 else 'public' if all_known
3580 else None)
3581
4bb6b02f 3582 def _configuration_arg(self, key, default=NO_DEFAULT, casesense=False):
3583 '''
3584 @returns A list of values for the extractor argument given by "key"
3585 or "default" if no such key is present
3586 @param default The default value to return when the key is not present (default: [])
3587 @param casesense When false, the values are converted to lower case
3588 '''
3589 val = traverse_obj(
5d3a0e79 3590 self._downloader.params, ('extractor_args', self.ie_key().lower(), key))
4bb6b02f 3591 if val is None:
3592 return [] if default is NO_DEFAULT else default
3593 return list(val) if casesense else [x.lower() for x in val]
5d3a0e79 3594
8dbe9899 3595
d6983cb4
PH
3596class SearchInfoExtractor(InfoExtractor):
3597 """
3598 Base class for paged search queries extractors.
10952eb2 3599 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
d6983cb4
PH
3600 Instances should define _SEARCH_KEY and _MAX_RESULTS.
3601 """
3602
3603 @classmethod
3604 def _make_valid_url(cls):
3605 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
3606
3607 @classmethod
3608 def suitable(cls, url):
3609 return re.match(cls._make_valid_url(), url) is not None
3610
3611 def _real_extract(self, query):
3612 mobj = re.match(self._make_valid_url(), query)
3613 if mobj is None:
f1a9d64e 3614 raise ExtractorError('Invalid search query "%s"' % query)
d6983cb4
PH
3615
3616 prefix = mobj.group('prefix')
3617 query = mobj.group('query')
3618 if prefix == '':
3619 return self._get_n_results(query, 1)
3620 elif prefix == 'all':
3621 return self._get_n_results(query, self._MAX_RESULTS)
3622 else:
3623 n = int(prefix)
3624 if n <= 0:
f1a9d64e 3625 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
d6983cb4 3626 elif n > self._MAX_RESULTS:
6a39ee13 3627 self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
d6983cb4
PH
3628 n = self._MAX_RESULTS
3629 return self._get_n_results(query, n)
3630
3631 def _get_n_results(self, query, n):
3632 """Get a specified number of results for a query"""
611c1dd9 3633 raise NotImplementedError('This method must be implemented by subclasses')
0f818663
PH
3634
3635 @property
3636 def SEARCH_KEY(self):
3637 return self._SEARCH_KEY