]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/common.py
Update to ytdl-commit-2dd6c6e
[yt-dlp.git] / yt_dlp / extractor / common.py
1 import base64
2 import collections
3 import getpass
4 import hashlib
5 import http.client
6 import http.cookiejar
7 import http.cookies
8 import inspect
9 import itertools
10 import json
11 import math
12 import netrc
13 import os
14 import random
15 import re
16 import sys
17 import time
18 import types
19 import urllib.parse
20 import urllib.request
21 import xml.etree.ElementTree
22
23 from ..compat import functools # isort: split
24 from ..compat import compat_etree_fromstring, compat_expanduser, compat_os_name
25 from ..cookies import LenientSimpleCookie
26 from ..downloader.f4m import get_base_url, remove_encrypted_media
27 from ..utils import (
28 IDENTITY,
29 JSON_LD_RE,
30 NO_DEFAULT,
31 ExtractorError,
32 FormatSorter,
33 GeoRestrictedError,
34 GeoUtils,
35 HEADRequest,
36 LenientJSONDecoder,
37 RegexNotFoundError,
38 RetryManager,
39 UnsupportedError,
40 age_restricted,
41 base_url,
42 bug_reports_message,
43 classproperty,
44 clean_html,
45 deprecation_warning,
46 determine_ext,
47 dict_get,
48 encode_data_uri,
49 error_to_compat_str,
50 extract_attributes,
51 filter_dict,
52 fix_xml_ampersands,
53 float_or_none,
54 format_field,
55 int_or_none,
56 join_nonempty,
57 js_to_json,
58 mimetype2ext,
59 network_exceptions,
60 orderedSet,
61 parse_bitrate,
62 parse_codecs,
63 parse_duration,
64 parse_iso8601,
65 parse_m3u8_attributes,
66 parse_resolution,
67 sanitize_filename,
68 sanitize_url,
69 sanitized_Request,
70 smuggle_url,
71 str_or_none,
72 str_to_int,
73 strip_or_none,
74 traverse_obj,
75 truncate_string,
76 try_call,
77 try_get,
78 unescapeHTML,
79 unified_strdate,
80 unified_timestamp,
81 update_Request,
82 update_url_query,
83 url_basename,
84 url_or_none,
85 urlhandle_detect_ext,
86 urljoin,
87 variadic,
88 xpath_element,
89 xpath_text,
90 xpath_with_ns,
91 )
92
93
94 class InfoExtractor:
95 """Information Extractor class.
96
97 Information extractors are the classes that, given a URL, extract
98 information about the video (or videos) the URL refers to. This
99 information includes the real video URL, the video title, author and
100 others. The information is stored in a dictionary which is then
101 passed to the YoutubeDL. The YoutubeDL processes this
102 information possibly downloading the video to the file system, among
103 other possible outcomes.
104
105 The type field determines the type of the result.
106 By far the most common value (and the default if _type is missing) is
107 "video", which indicates a single video.
108
109 For a video, the dictionaries must include the following fields:
110
111 id: Video identifier.
112 title: Video title, unescaped. Set to an empty string if video has
113 no title as opposed to "None" which signifies that the
114 extractor failed to obtain a title
115
116 Additionally, it must contain either a formats entry or a url one:
117
118 formats: A list of dictionaries for each format available, ordered
119 from worst to best quality.
120
121 Potential fields:
122 * url The mandatory URL representing the media:
123 for plain file media - HTTP URL of this file,
124 for RTMP - RTMP URL,
125 for HLS - URL of the M3U8 media playlist,
126 for HDS - URL of the F4M manifest,
127 for DASH
128 - HTTP URL to plain file media (in case of
129 unfragmented media)
130 - URL of the MPD manifest or base URL
131 representing the media if MPD manifest
132 is parsed from a string (in case of
133 fragmented media)
134 for MSS - URL of the ISM manifest.
135 * manifest_url
136 The URL of the manifest file in case of
137 fragmented media:
138 for HLS - URL of the M3U8 master playlist,
139 for HDS - URL of the F4M manifest,
140 for DASH - URL of the MPD manifest,
141 for MSS - URL of the ISM manifest.
142 * manifest_stream_number (For internal use only)
143 The index of the stream in the manifest file
144 * ext Will be calculated from URL if missing
145 * format A human-readable description of the format
146 ("mp4 container with h264/opus").
147 Calculated from the format_id, width, height.
148 and format_note fields if missing.
149 * format_id A short description of the format
150 ("mp4_h264_opus" or "19").
151 Technically optional, but strongly recommended.
152 * format_note Additional info about the format
153 ("3D" or "DASH video")
154 * width Width of the video, if known
155 * height Height of the video, if known
156 * aspect_ratio Aspect ratio of the video, if known
157 Automatically calculated from width and height
158 * resolution Textual description of width and height
159 Automatically calculated from width and height
160 * dynamic_range The dynamic range of the video. One of:
161 "SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV"
162 * tbr Average bitrate of audio and video in KBit/s
163 * abr Average audio bitrate in KBit/s
164 * acodec Name of the audio codec in use
165 * asr Audio sampling rate in Hertz
166 * audio_channels Number of audio channels
167 * vbr Average video bitrate in KBit/s
168 * fps Frame rate
169 * vcodec Name of the video codec in use
170 * container Name of the container format
171 * filesize The number of bytes, if known in advance
172 * filesize_approx An estimate for the number of bytes
173 * player_url SWF Player URL (used for rtmpdump).
174 * protocol The protocol that will be used for the actual
175 download, lower-case. One of "http", "https" or
176 one of the protocols defined in downloader.PROTOCOL_MAP
177 * fragment_base_url
178 Base URL for fragments. Each fragment's path
179 value (if present) will be relative to
180 this URL.
181 * fragments A list of fragments of a fragmented media.
182 Each fragment entry must contain either an url
183 or a path. If an url is present it should be
184 considered by a client. Otherwise both path and
185 fragment_base_url must be present. Here is
186 the list of all potential fields:
187 * "url" - fragment's URL
188 * "path" - fragment's path relative to
189 fragment_base_url
190 * "duration" (optional, int or float)
191 * "filesize" (optional, int)
192 * is_from_start Is a live format that can be downloaded
193 from the start. Boolean
194 * preference Order number of this format. If this field is
195 present and not None, the formats get sorted
196 by this field, regardless of all other values.
197 -1 for default (order by other properties),
198 -2 or smaller for less than default.
199 < -1000 to hide the format (if there is
200 another one which is strictly better)
201 * language Language code, e.g. "de" or "en-US".
202 * language_preference Is this in the language mentioned in
203 the URL?
204 10 if it's what the URL is about,
205 -1 for default (don't know),
206 -10 otherwise, other values reserved for now.
207 * quality Order number of the video quality of this
208 format, irrespective of the file format.
209 -1 for default (order by other properties),
210 -2 or smaller for less than default.
211 * source_preference Order number for this video source
212 (quality takes higher priority)
213 -1 for default (order by other properties),
214 -2 or smaller for less than default.
215 * http_headers A dictionary of additional HTTP headers
216 to add to the request.
217 * stretched_ratio If given and not 1, indicates that the
218 video's pixels are not square.
219 width : height ratio as float.
220 * no_resume The server does not support resuming the
221 (HTTP or RTMP) download. Boolean.
222 * has_drm The format has DRM and cannot be downloaded. Boolean
223 * extra_param_to_segment_url A query string to append to each
224 fragment's URL, or to update each existing query string
225 with. Only applied by the native HLS/DASH downloaders.
226 * hls_aes A dictionary of HLS AES-128 decryption information
227 used by the native HLS downloader to override the
228 values in the media playlist when an '#EXT-X-KEY' tag
229 is present in the playlist:
230 * uri The URI from which the key will be downloaded
231 * key The key (as hex) used to decrypt fragments.
232 If `key` is given, any key URI will be ignored
233 * iv The IV (as hex) used to decrypt fragments
234 * downloader_options A dictionary of downloader options
235 (For internal use only)
236 * http_chunk_size Chunk size for HTTP downloads
237 * ffmpeg_args Extra arguments for ffmpeg downloader
238 RTMP formats can also have the additional fields: page_url,
239 app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
240 rtmp_protocol, rtmp_real_time
241
242 url: Final video URL.
243 ext: Video filename extension.
244 format: The video format, defaults to ext (used for --get-format)
245 player_url: SWF Player URL (used for rtmpdump).
246
247 The following fields are optional:
248
249 direct: True if a direct video file was given (must only be set by GenericIE)
250 alt_title: A secondary title of the video.
251 display_id An alternative identifier for the video, not necessarily
252 unique, but available before title. Typically, id is
253 something like "4234987", title "Dancing naked mole rats",
254 and display_id "dancing-naked-mole-rats"
255 thumbnails: A list of dictionaries, with the following entries:
256 * "id" (optional, string) - Thumbnail format ID
257 * "url"
258 * "preference" (optional, int) - quality of the image
259 * "width" (optional, int)
260 * "height" (optional, int)
261 * "resolution" (optional, string "{width}x{height}",
262 deprecated)
263 * "filesize" (optional, int)
264 * "http_headers" (dict) - HTTP headers for the request
265 thumbnail: Full URL to a video thumbnail image.
266 description: Full video description.
267 uploader: Full name of the video uploader.
268 license: License name the video is licensed under.
269 creator: The creator of the video.
270 timestamp: UNIX timestamp of the moment the video was uploaded
271 upload_date: Video upload date in UTC (YYYYMMDD).
272 If not explicitly set, calculated from timestamp
273 release_timestamp: UNIX timestamp of the moment the video was released.
274 If it is not clear whether to use timestamp or this, use the former
275 release_date: The date (YYYYMMDD) when the video was released in UTC.
276 If not explicitly set, calculated from release_timestamp
277 modified_timestamp: UNIX timestamp of the moment the video was last modified.
278 modified_date: The date (YYYYMMDD) when the video was last modified in UTC.
279 If not explicitly set, calculated from modified_timestamp
280 uploader_id: Nickname or id of the video uploader.
281 uploader_url: Full URL to a personal webpage of the video uploader.
282 channel: Full name of the channel the video is uploaded on.
283 Note that channel fields may or may not repeat uploader
284 fields. This depends on a particular extractor.
285 channel_id: Id of the channel.
286 channel_url: Full URL to a channel webpage.
287 channel_follower_count: Number of followers of the channel.
288 location: Physical location where the video was filmed.
289 subtitles: The available subtitles as a dictionary in the format
290 {tag: subformats}. "tag" is usually a language code, and
291 "subformats" is a list sorted from lower to higher
292 preference, each element is a dictionary with the "ext"
293 entry and one of:
294 * "data": The subtitles file contents
295 * "url": A URL pointing to the subtitles file
296 It can optionally also have:
297 * "name": Name or description of the subtitles
298 * "http_headers": A dictionary of additional HTTP headers
299 to add to the request.
300 "ext" will be calculated from URL if missing
301 automatic_captions: Like 'subtitles'; contains automatically generated
302 captions instead of normal subtitles
303 duration: Length of the video in seconds, as an integer or float.
304 view_count: How many users have watched the video on the platform.
305 concurrent_view_count: How many users are currently watching the video on the platform.
306 like_count: Number of positive ratings of the video
307 dislike_count: Number of negative ratings of the video
308 repost_count: Number of reposts of the video
309 average_rating: Average rating give by users, the scale used depends on the webpage
310 comment_count: Number of comments on the video
311 comments: A list of comments, each with one or more of the following
312 properties (all but one of text or html optional):
313 * "author" - human-readable name of the comment author
314 * "author_id" - user ID of the comment author
315 * "author_thumbnail" - The thumbnail of the comment author
316 * "id" - Comment ID
317 * "html" - Comment as HTML
318 * "text" - Plain text of the comment
319 * "timestamp" - UNIX timestamp of comment
320 * "parent" - ID of the comment this one is replying to.
321 Set to "root" to indicate that this is a
322 comment to the original video.
323 * "like_count" - Number of positive ratings of the comment
324 * "dislike_count" - Number of negative ratings of the comment
325 * "is_favorited" - Whether the comment is marked as
326 favorite by the video uploader
327 * "author_is_uploader" - Whether the comment is made by
328 the video uploader
329 age_limit: Age restriction for the video, as an integer (years)
330 webpage_url: The URL to the video webpage, if given to yt-dlp it
331 should allow to get the same result again. (It will be set
332 by YoutubeDL if it's missing)
333 categories: A list of categories that the video falls in, for example
334 ["Sports", "Berlin"]
335 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
336 cast: A list of the video cast
337 is_live: True, False, or None (=unknown). Whether this video is a
338 live stream that goes on instead of a fixed-length video.
339 was_live: True, False, or None (=unknown). Whether this video was
340 originally a live stream.
341 live_status: None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live',
342 or 'post_live' (was live, but VOD is not yet processed)
343 If absent, automatically set from is_live, was_live
344 start_time: Time in seconds where the reproduction should start, as
345 specified in the URL.
346 end_time: Time in seconds where the reproduction should end, as
347 specified in the URL.
348 chapters: A list of dictionaries, with the following entries:
349 * "start_time" - The start time of the chapter in seconds
350 * "end_time" - The end time of the chapter in seconds
351 * "title" (optional, string)
352 playable_in_embed: Whether this video is allowed to play in embedded
353 players on other sites. Can be True (=always allowed),
354 False (=never allowed), None (=unknown), or a string
355 specifying the criteria for embedability; e.g. 'whitelist'
356 availability: Under what condition the video is available. One of
357 'private', 'premium_only', 'subscriber_only', 'needs_auth',
358 'unlisted' or 'public'. Use 'InfoExtractor._availability'
359 to set it
360 _old_archive_ids: A list of old archive ids needed for backward compatibility
361 _format_sort_fields: A list of fields to use for sorting formats
362 __post_extractor: A function to be called just before the metadata is
363 written to either disk, logger or console. The function
364 must return a dict which will be added to the info_dict.
365 This is usefull for additional information that is
366 time-consuming to extract. Note that the fields thus
367 extracted will not be available to output template and
368 match_filter. So, only "comments" and "comment_count" are
369 currently allowed to be extracted via this method.
370
371 The following fields should only be used when the video belongs to some logical
372 chapter or section:
373
374 chapter: Name or title of the chapter the video belongs to.
375 chapter_number: Number of the chapter the video belongs to, as an integer.
376 chapter_id: Id of the chapter the video belongs to, as a unicode string.
377
378 The following fields should only be used when the video is an episode of some
379 series, programme or podcast:
380
381 series: Title of the series or programme the video episode belongs to.
382 series_id: Id of the series or programme the video episode belongs to, as a unicode string.
383 season: Title of the season the video episode belongs to.
384 season_number: Number of the season the video episode belongs to, as an integer.
385 season_id: Id of the season the video episode belongs to, as a unicode string.
386 episode: Title of the video episode. Unlike mandatory video title field,
387 this field should denote the exact title of the video episode
388 without any kind of decoration.
389 episode_number: Number of the video episode within a season, as an integer.
390 episode_id: Id of the video episode, as a unicode string.
391
392 The following fields should only be used when the media is a track or a part of
393 a music album:
394
395 track: Title of the track.
396 track_number: Number of the track within an album or a disc, as an integer.
397 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
398 as a unicode string.
399 artist: Artist(s) of the track.
400 genre: Genre(s) of the track.
401 album: Title of the album the track belongs to.
402 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
403 album_artist: List of all artists appeared on the album (e.g.
404 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
405 and compilations).
406 disc_number: Number of the disc or other physical medium the track belongs to,
407 as an integer.
408 release_year: Year (YYYY) when the album was released.
409 composer: Composer of the piece
410
411 The following fields should only be set for clips that should be cut from the original video:
412
413 section_start: Start time of the section in seconds
414 section_end: End time of the section in seconds
415
416 The following fields should only be set for storyboards:
417 rows: Number of rows in each storyboard fragment, as an integer
418 columns: Number of columns in each storyboard fragment, as an integer
419
420 Unless mentioned otherwise, the fields should be Unicode strings.
421
422 Unless mentioned otherwise, None is equivalent to absence of information.
423
424
425 _type "playlist" indicates multiple videos.
426 There must be a key "entries", which is a list, an iterable, or a PagedList
427 object, each element of which is a valid dictionary by this specification.
428
429 Additionally, playlists can have "id", "title", and any other relevant
430 attributes with the same semantics as videos (see above).
431
432 It can also have the following optional fields:
433
434 playlist_count: The total number of videos in a playlist. If not given,
435 YoutubeDL tries to calculate it from "entries"
436
437
438 _type "multi_video" indicates that there are multiple videos that
439 form a single show, for examples multiple acts of an opera or TV episode.
440 It must have an entries key like a playlist and contain all the keys
441 required for a video at the same time.
442
443
444 _type "url" indicates that the video must be extracted from another
445 location, possibly by a different extractor. Its only required key is:
446 "url" - the next URL to extract.
447 The key "ie_key" can be set to the class name (minus the trailing "IE",
448 e.g. "Youtube") if the extractor class is known in advance.
449 Additionally, the dictionary may have any properties of the resolved entity
450 known in advance, for example "title" if the title of the referred video is
451 known ahead of time.
452
453
454 _type "url_transparent" entities have the same specification as "url", but
455 indicate that the given additional information is more precise than the one
456 associated with the resolved URL.
457 This is useful when a site employs a video service that hosts the video and
458 its technical metadata, but that video service does not embed a useful
459 title, description etc.
460
461
462 Subclasses of this should also be added to the list of extractors and
463 should define a _VALID_URL regexp and, re-define the _real_extract() and
464 (optionally) _real_initialize() methods.
465
466 Subclasses may also override suitable() if necessary, but ensure the function
467 signature is preserved and that this function imports everything it needs
468 (except other extractors), so that lazy_extractors works correctly.
469
470 Subclasses can define a list of _EMBED_REGEX, which will be searched for in
471 the HTML of Generic webpages. It may also override _extract_embed_urls
472 or _extract_from_webpage as necessary. While these are normally classmethods,
473 _extract_from_webpage is allowed to be an instance method.
474
475 _extract_from_webpage may raise self.StopExtraction() to stop further
476 processing of the webpage and obtain exclusive rights to it. This is useful
477 when the extractor cannot reliably be matched using just the URL,
478 e.g. invidious/peertube instances
479
480 Embed-only extractors can be defined by setting _VALID_URL = False.
481
482 To support username + password (or netrc) login, the extractor must define a
483 _NETRC_MACHINE and re-define _perform_login(username, password) and
484 (optionally) _initialize_pre_login() methods. The _perform_login method will
485 be called between _initialize_pre_login and _real_initialize if credentials
486 are passed by the user. In cases where it is necessary to have the login
487 process as part of the extraction rather than initialization, _perform_login
488 can be left undefined.
489
490 _GEO_BYPASS attribute may be set to False in order to disable
491 geo restriction bypass mechanisms for a particular extractor.
492 Though it won't disable explicit geo restriction bypass based on
493 country code provided with geo_bypass_country.
494
495 _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
496 countries for this extractor. One of these countries will be used by
497 geo restriction bypass mechanism right away in order to bypass
498 geo restriction, of course, if the mechanism is not disabled.
499
500 _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
501 IP blocks in CIDR notation for this extractor. One of these IP blocks
502 will be used by geo restriction bypass mechanism similarly
503 to _GEO_COUNTRIES.
504
505 The _ENABLED attribute should be set to False for IEs that
506 are disabled by default and must be explicitly enabled.
507
508 The _WORKING attribute should be set to False for broken IEs
509 in order to warn the users and skip the tests.
510 """
511
512 _ready = False
513 _downloader = None
514 _x_forwarded_for_ip = None
515 _GEO_BYPASS = True
516 _GEO_COUNTRIES = None
517 _GEO_IP_BLOCKS = None
518 _WORKING = True
519 _ENABLED = True
520 _NETRC_MACHINE = None
521 IE_DESC = None
522 SEARCH_KEY = None
523 _VALID_URL = None
524 _EMBED_REGEX = []
525
526 def _login_hint(self, method=NO_DEFAULT, netrc=None):
527 password_hint = f'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
528 return {
529 None: '',
530 'any': f'Use --cookies, --cookies-from-browser, {password_hint}',
531 'password': f'Use {password_hint}',
532 'cookies': (
533 'Use --cookies-from-browser or --cookies for the authentication. '
534 'See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies'),
535 }[method if method is not NO_DEFAULT else 'any' if self.supports_login() else 'cookies']
536
537 def __init__(self, downloader=None):
538 """Constructor. Receives an optional downloader (a YoutubeDL instance).
539 If a downloader is not passed during initialization,
540 it must be set using "set_downloader()" before "extract()" is called"""
541 self._ready = False
542 self._x_forwarded_for_ip = None
543 self._printed_messages = set()
544 self.set_downloader(downloader)
545
546 @classmethod
547 def _match_valid_url(cls, url):
548 if cls._VALID_URL is False:
549 return None
550 # This does not use has/getattr intentionally - we want to know whether
551 # we have cached the regexp for *this* class, whereas getattr would also
552 # match the superclass
553 if '_VALID_URL_RE' not in cls.__dict__:
554 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
555 return cls._VALID_URL_RE.match(url)
556
557 @classmethod
558 def suitable(cls, url):
559 """Receives a URL and returns True if suitable for this IE."""
560 # This function must import everything it needs (except other extractors),
561 # so that lazy_extractors works correctly
562 return cls._match_valid_url(url) is not None
563
564 @classmethod
565 def _match_id(cls, url):
566 return cls._match_valid_url(url).group('id')
567
568 @classmethod
569 def get_temp_id(cls, url):
570 try:
571 return cls._match_id(url)
572 except (IndexError, AttributeError):
573 return None
574
575 @classmethod
576 def working(cls):
577 """Getter method for _WORKING."""
578 return cls._WORKING
579
580 @classmethod
581 def supports_login(cls):
582 return bool(cls._NETRC_MACHINE)
583
584 def initialize(self):
585 """Initializes an instance (authentication, etc)."""
586 self._printed_messages = set()
587 self._initialize_geo_bypass({
588 'countries': self._GEO_COUNTRIES,
589 'ip_blocks': self._GEO_IP_BLOCKS,
590 })
591 if not self._ready:
592 self._initialize_pre_login()
593 if self.supports_login():
594 username, password = self._get_login_info()
595 if username:
596 self._perform_login(username, password)
597 elif self.get_param('username') and False not in (self.IE_DESC, self._NETRC_MACHINE):
598 self.report_warning(f'Login with password is not supported for this website. {self._login_hint("cookies")}')
599 self._real_initialize()
600 self._ready = True
601
602 def _initialize_geo_bypass(self, geo_bypass_context):
603 """
604 Initialize geo restriction bypass mechanism.
605
606 This method is used to initialize geo bypass mechanism based on faking
607 X-Forwarded-For HTTP header. A random country from provided country list
608 is selected and a random IP belonging to this country is generated. This
609 IP will be passed as X-Forwarded-For HTTP header in all subsequent
610 HTTP requests.
611
612 This method will be used for initial geo bypass mechanism initialization
613 during the instance initialization with _GEO_COUNTRIES and
614 _GEO_IP_BLOCKS.
615
616 You may also manually call it from extractor's code if geo bypass
617 information is not available beforehand (e.g. obtained during
618 extraction) or due to some other reason. In this case you should pass
619 this information in geo bypass context passed as first argument. It may
620 contain following fields:
621
622 countries: List of geo unrestricted countries (similar
623 to _GEO_COUNTRIES)
624 ip_blocks: List of geo unrestricted IP blocks in CIDR notation
625 (similar to _GEO_IP_BLOCKS)
626
627 """
628 if not self._x_forwarded_for_ip:
629
630 # Geo bypass mechanism is explicitly disabled by user
631 if not self.get_param('geo_bypass', True):
632 return
633
634 if not geo_bypass_context:
635 geo_bypass_context = {}
636
637 # Backward compatibility: previously _initialize_geo_bypass
638 # expected a list of countries, some 3rd party code may still use
639 # it this way
640 if isinstance(geo_bypass_context, (list, tuple)):
641 geo_bypass_context = {
642 'countries': geo_bypass_context,
643 }
644
645 # The whole point of geo bypass mechanism is to fake IP
646 # as X-Forwarded-For HTTP header based on some IP block or
647 # country code.
648
649 # Path 1: bypassing based on IP block in CIDR notation
650
651 # Explicit IP block specified by user, use it right away
652 # regardless of whether extractor is geo bypassable or not
653 ip_block = self.get_param('geo_bypass_ip_block', None)
654
655 # Otherwise use random IP block from geo bypass context but only
656 # if extractor is known as geo bypassable
657 if not ip_block:
658 ip_blocks = geo_bypass_context.get('ip_blocks')
659 if self._GEO_BYPASS and ip_blocks:
660 ip_block = random.choice(ip_blocks)
661
662 if ip_block:
663 self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
664 self.write_debug(f'Using fake IP {self._x_forwarded_for_ip} as X-Forwarded-For')
665 return
666
667 # Path 2: bypassing based on country code
668
669 # Explicit country code specified by user, use it right away
670 # regardless of whether extractor is geo bypassable or not
671 country = self.get_param('geo_bypass_country', None)
672
673 # Otherwise use random country code from geo bypass context but
674 # only if extractor is known as geo bypassable
675 if not country:
676 countries = geo_bypass_context.get('countries')
677 if self._GEO_BYPASS and countries:
678 country = random.choice(countries)
679
680 if country:
681 self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
682 self._downloader.write_debug(
683 f'Using fake IP {self._x_forwarded_for_ip} ({country.upper()}) as X-Forwarded-For')
684
685 def extract(self, url):
686 """Extracts URL information and returns it in list of dicts."""
687 try:
688 for _ in range(2):
689 try:
690 self.initialize()
691 self.to_screen('Extracting URL: %s' % (
692 url if self.get_param('verbose') else truncate_string(url, 100, 20)))
693 ie_result = self._real_extract(url)
694 if ie_result is None:
695 return None
696 if self._x_forwarded_for_ip:
697 ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
698 subtitles = ie_result.get('subtitles') or {}
699 if 'no-live-chat' in self.get_param('compat_opts'):
700 for lang in ('live_chat', 'comments', 'danmaku'):
701 subtitles.pop(lang, None)
702 return ie_result
703 except GeoRestrictedError as e:
704 if self.__maybe_fake_ip_and_retry(e.countries):
705 continue
706 raise
707 except UnsupportedError:
708 raise
709 except ExtractorError as e:
710 e.video_id = e.video_id or self.get_temp_id(url),
711 e.ie = e.ie or self.IE_NAME,
712 e.traceback = e.traceback or sys.exc_info()[2]
713 raise
714 except http.client.IncompleteRead as e:
715 raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
716 except (KeyError, StopIteration) as e:
717 raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
718
719 def __maybe_fake_ip_and_retry(self, countries):
720 if (not self.get_param('geo_bypass_country', None)
721 and self._GEO_BYPASS
722 and self.get_param('geo_bypass', True)
723 and not self._x_forwarded_for_ip
724 and countries):
725 country_code = random.choice(countries)
726 self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
727 if self._x_forwarded_for_ip:
728 self.report_warning(
729 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
730 % (self._x_forwarded_for_ip, country_code.upper()))
731 return True
732 return False
733
734 def set_downloader(self, downloader):
735 """Sets a YoutubeDL instance as the downloader for this IE."""
736 self._downloader = downloader
737
738 @property
739 def cache(self):
740 return self._downloader.cache
741
742 @property
743 def cookiejar(self):
744 return self._downloader.cookiejar
745
746 def _initialize_pre_login(self):
747 """ Initialization before login. Redefine in subclasses."""
748 pass
749
750 def _perform_login(self, username, password):
751 """ Login with username and password. Redefine in subclasses."""
752 pass
753
754 def _real_initialize(self):
755 """Real initialization process. Redefine in subclasses."""
756 pass
757
758 def _real_extract(self, url):
759 """Real extraction process. Redefine in subclasses."""
760 raise NotImplementedError('This method must be implemented by subclasses')
761
762 @classmethod
763 def ie_key(cls):
764 """A string for getting the InfoExtractor with get_info_extractor"""
765 return cls.__name__[:-2]
766
767 @classproperty
768 def IE_NAME(cls):
769 return cls.__name__[:-2]
770
771 @staticmethod
772 def __can_accept_status_code(err, expected_status):
773 assert isinstance(err, urllib.error.HTTPError)
774 if expected_status is None:
775 return False
776 elif callable(expected_status):
777 return expected_status(err.code) is True
778 else:
779 return err.code in variadic(expected_status)
780
781 def _create_request(self, url_or_request, data=None, headers=None, query=None):
782 if isinstance(url_or_request, urllib.request.Request):
783 return update_Request(url_or_request, data=data, headers=headers, query=query)
784 if query:
785 url_or_request = update_url_query(url_or_request, query)
786 return sanitized_Request(url_or_request, data, headers or {})
787
788 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers=None, query=None, expected_status=None):
789 """
790 Return the response handle.
791
792 See _download_webpage docstring for arguments specification.
793 """
794 if not self._downloader._first_webpage_request:
795 sleep_interval = self.get_param('sleep_interval_requests') or 0
796 if sleep_interval > 0:
797 self.to_screen('Sleeping %s seconds ...' % sleep_interval)
798 time.sleep(sleep_interval)
799 else:
800 self._downloader._first_webpage_request = False
801
802 if note is None:
803 self.report_download_webpage(video_id)
804 elif note is not False:
805 if video_id is None:
806 self.to_screen(str(note))
807 else:
808 self.to_screen(f'{video_id}: {note}')
809
810 # Some sites check X-Forwarded-For HTTP header in order to figure out
811 # the origin of the client behind proxy. This allows bypassing geo
812 # restriction by faking this header's value to IP that belongs to some
813 # geo unrestricted country. We will do so once we encounter any
814 # geo restriction error.
815 if self._x_forwarded_for_ip:
816 headers = (headers or {}).copy()
817 headers.setdefault('X-Forwarded-For', self._x_forwarded_for_ip)
818
819 try:
820 return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
821 except network_exceptions as err:
822 if isinstance(err, urllib.error.HTTPError):
823 if self.__can_accept_status_code(err, expected_status):
824 # Retain reference to error to prevent file object from
825 # being closed before it can be read. Works around the
826 # effects of <https://bugs.python.org/issue15002>
827 # introduced in Python 3.4.1.
828 err.fp._error = err
829 return err.fp
830
831 if errnote is False:
832 return False
833 if errnote is None:
834 errnote = 'Unable to download webpage'
835
836 errmsg = f'{errnote}: {error_to_compat_str(err)}'
837 if fatal:
838 raise ExtractorError(errmsg, cause=err)
839 else:
840 self.report_warning(errmsg)
841 return False
842
843 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True,
844 encoding=None, data=None, headers={}, query={}, expected_status=None):
845 """
846 Return a tuple (page content as string, URL handle).
847
848 Arguments:
849 url_or_request -- plain text URL as a string or
850 a urllib.request.Request object
851 video_id -- Video/playlist/item identifier (string)
852
853 Keyword arguments:
854 note -- note printed before downloading (string)
855 errnote -- note printed in case of an error (string)
856 fatal -- flag denoting whether error should be considered fatal,
857 i.e. whether it should cause ExtractionError to be raised,
858 otherwise a warning will be reported and extraction continued
859 encoding -- encoding for a page content decoding, guessed automatically
860 when not explicitly specified
861 data -- POST data (bytes)
862 headers -- HTTP headers (dict)
863 query -- URL query (dict)
864 expected_status -- allows to accept failed HTTP requests (non 2xx
865 status code) by explicitly specifying a set of accepted status
866 codes. Can be any of the following entities:
867 - an integer type specifying an exact failed status code to
868 accept
869 - a list or a tuple of integer types specifying a list of
870 failed status codes to accept
871 - a callable accepting an actual failed status code and
872 returning True if it should be accepted
873 Note that this argument does not affect success status codes (2xx)
874 which are always accepted.
875 """
876
877 # Strip hashes from the URL (#1038)
878 if isinstance(url_or_request, str):
879 url_or_request = url_or_request.partition('#')[0]
880
881 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
882 if urlh is False:
883 assert not fatal
884 return False
885 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
886 return (content, urlh)
887
888 @staticmethod
889 def _guess_encoding_from_content(content_type, webpage_bytes):
890 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
891 if m:
892 encoding = m.group(1)
893 else:
894 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
895 webpage_bytes[:1024])
896 if m:
897 encoding = m.group(1).decode('ascii')
898 elif webpage_bytes.startswith(b'\xff\xfe'):
899 encoding = 'utf-16'
900 else:
901 encoding = 'utf-8'
902
903 return encoding
904
905 def __check_blocked(self, content):
906 first_block = content[:512]
907 if ('<title>Access to this site is blocked</title>' in content
908 and 'Websense' in first_block):
909 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
910 blocked_iframe = self._html_search_regex(
911 r'<iframe src="([^"]+)"', content,
912 'Websense information URL', default=None)
913 if blocked_iframe:
914 msg += ' Visit %s for more details' % blocked_iframe
915 raise ExtractorError(msg, expected=True)
916 if '<title>The URL you requested has been blocked</title>' in first_block:
917 msg = (
918 'Access to this webpage has been blocked by Indian censorship. '
919 'Use a VPN or proxy server (with --proxy) to route around it.')
920 block_msg = self._html_search_regex(
921 r'</h1><p>(.*?)</p>',
922 content, 'block message', default=None)
923 if block_msg:
924 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
925 raise ExtractorError(msg, expected=True)
926 if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
927 and 'blocklist.rkn.gov.ru' in content):
928 raise ExtractorError(
929 'Access to this webpage has been blocked by decision of the Russian government. '
930 'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
931 expected=True)
932
933 def _request_dump_filename(self, url, video_id):
934 basen = f'{video_id}_{url}'
935 trim_length = self.get_param('trim_file_name') or 240
936 if len(basen) > trim_length:
937 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
938 basen = basen[:trim_length - len(h)] + h
939 filename = sanitize_filename(f'{basen}.dump', restricted=True)
940 # Working around MAX_PATH limitation on Windows (see
941 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
942 if compat_os_name == 'nt':
943 absfilepath = os.path.abspath(filename)
944 if len(absfilepath) > 259:
945 filename = fR'\\?\{absfilepath}'
946 return filename
947
948 def __decode_webpage(self, webpage_bytes, encoding, headers):
949 if not encoding:
950 encoding = self._guess_encoding_from_content(headers.get('Content-Type', ''), webpage_bytes)
951 try:
952 return webpage_bytes.decode(encoding, 'replace')
953 except LookupError:
954 return webpage_bytes.decode('utf-8', 'replace')
955
956 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
957 webpage_bytes = urlh.read()
958 if prefix is not None:
959 webpage_bytes = prefix + webpage_bytes
960 if self.get_param('dump_intermediate_pages', False):
961 self.to_screen('Dumping request to ' + urlh.geturl())
962 dump = base64.b64encode(webpage_bytes).decode('ascii')
963 self._downloader.to_screen(dump)
964 if self.get_param('write_pages'):
965 filename = self._request_dump_filename(urlh.geturl(), video_id)
966 self.to_screen(f'Saving request to {filename}')
967 with open(filename, 'wb') as outf:
968 outf.write(webpage_bytes)
969
970 content = self.__decode_webpage(webpage_bytes, encoding, urlh.headers)
971 self.__check_blocked(content)
972
973 return content
974
975 def __print_error(self, errnote, fatal, video_id, err):
976 if fatal:
977 raise ExtractorError(f'{video_id}: {errnote}', cause=err)
978 elif errnote:
979 self.report_warning(f'{video_id}: {errnote}: {err}')
980
981 def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True, errnote=None):
982 if transform_source:
983 xml_string = transform_source(xml_string)
984 try:
985 return compat_etree_fromstring(xml_string.encode('utf-8'))
986 except xml.etree.ElementTree.ParseError as ve:
987 self.__print_error('Failed to parse XML' if errnote is None else errnote, fatal, video_id, ve)
988
989 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True, errnote=None, **parser_kwargs):
990 try:
991 return json.loads(
992 json_string, cls=LenientJSONDecoder, strict=False, transform_source=transform_source, **parser_kwargs)
993 except ValueError as ve:
994 self.__print_error('Failed to parse JSON' if errnote is None else errnote, fatal, video_id, ve)
995
996 def _parse_socket_response_as_json(self, data, *args, **kwargs):
997 return self._parse_json(data[data.find('{'):data.rfind('}') + 1], *args, **kwargs)
998
999 def __create_download_methods(name, parser, note, errnote, return_value):
1000
1001 def parse(ie, content, *args, errnote=errnote, **kwargs):
1002 if parser is None:
1003 return content
1004 if errnote is False:
1005 kwargs['errnote'] = errnote
1006 # parser is fetched by name so subclasses can override it
1007 return getattr(ie, parser)(content, *args, **kwargs)
1008
1009 def download_handle(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
1010 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
1011 res = self._download_webpage_handle(
1012 url_or_request, video_id, note=note, errnote=errnote, fatal=fatal, encoding=encoding,
1013 data=data, headers=headers, query=query, expected_status=expected_status)
1014 if res is False:
1015 return res
1016 content, urlh = res
1017 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote), urlh
1018
1019 def download_content(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
1020 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
1021 if self.get_param('load_pages'):
1022 url_or_request = self._create_request(url_or_request, data, headers, query)
1023 filename = self._request_dump_filename(url_or_request.full_url, video_id)
1024 self.to_screen(f'Loading request from {filename}')
1025 try:
1026 with open(filename, 'rb') as dumpf:
1027 webpage_bytes = dumpf.read()
1028 except OSError as e:
1029 self.report_warning(f'Unable to load request from disk: {e}')
1030 else:
1031 content = self.__decode_webpage(webpage_bytes, encoding, url_or_request.headers)
1032 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote)
1033 kwargs = {
1034 'note': note,
1035 'errnote': errnote,
1036 'transform_source': transform_source,
1037 'fatal': fatal,
1038 'encoding': encoding,
1039 'data': data,
1040 'headers': headers,
1041 'query': query,
1042 'expected_status': expected_status,
1043 }
1044 if parser is None:
1045 kwargs.pop('transform_source')
1046 # The method is fetched by name so subclasses can override _download_..._handle
1047 res = getattr(self, download_handle.__name__)(url_or_request, video_id, **kwargs)
1048 return res if res is False else res[0]
1049
1050 def impersonate(func, name, return_value):
1051 func.__name__, func.__qualname__ = name, f'InfoExtractor.{name}'
1052 func.__doc__ = f'''
1053 @param transform_source Apply this transformation before parsing
1054 @returns {return_value}
1055
1056 See _download_webpage_handle docstring for other arguments specification
1057 '''
1058
1059 impersonate(download_handle, f'_download_{name}_handle', f'({return_value}, URL handle)')
1060 impersonate(download_content, f'_download_{name}', f'{return_value}')
1061 return download_handle, download_content
1062
1063 _download_xml_handle, _download_xml = __create_download_methods(
1064 'xml', '_parse_xml', 'Downloading XML', 'Unable to download XML', 'xml as an xml.etree.ElementTree.Element')
1065 _download_json_handle, _download_json = __create_download_methods(
1066 'json', '_parse_json', 'Downloading JSON metadata', 'Unable to download JSON metadata', 'JSON object as a dict')
1067 _download_socket_json_handle, _download_socket_json = __create_download_methods(
1068 'socket_json', '_parse_socket_response_as_json', 'Polling socket', 'Unable to poll socket', 'JSON object as a dict')
1069 __download_webpage = __create_download_methods('webpage', None, None, None, 'data of the page as a string')[1]
1070
1071 def _download_webpage(
1072 self, url_or_request, video_id, note=None, errnote=None,
1073 fatal=True, tries=1, timeout=NO_DEFAULT, *args, **kwargs):
1074 """
1075 Return the data of the page as a string.
1076
1077 Keyword arguments:
1078 tries -- number of tries
1079 timeout -- sleep interval between tries
1080
1081 See _download_webpage_handle docstring for other arguments specification.
1082 """
1083
1084 R''' # NB: These are unused; should they be deprecated?
1085 if tries != 1:
1086 self._downloader.deprecation_warning('tries argument is deprecated in InfoExtractor._download_webpage')
1087 if timeout is NO_DEFAULT:
1088 timeout = 5
1089 else:
1090 self._downloader.deprecation_warning('timeout argument is deprecated in InfoExtractor._download_webpage')
1091 '''
1092
1093 try_count = 0
1094 while True:
1095 try:
1096 return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
1097 except http.client.IncompleteRead as e:
1098 try_count += 1
1099 if try_count >= tries:
1100 raise e
1101 self._sleep(timeout, video_id)
1102
1103 def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
1104 idstr = format_field(video_id, None, '%s: ')
1105 msg = f'[{self.IE_NAME}] {idstr}{msg}'
1106 if only_once:
1107 if f'WARNING: {msg}' in self._printed_messages:
1108 return
1109 self._printed_messages.add(f'WARNING: {msg}')
1110 self._downloader.report_warning(msg, *args, **kwargs)
1111
1112 def to_screen(self, msg, *args, **kwargs):
1113 """Print msg to screen, prefixing it with '[ie_name]'"""
1114 self._downloader.to_screen(f'[{self.IE_NAME}] {msg}', *args, **kwargs)
1115
1116 def write_debug(self, msg, *args, **kwargs):
1117 self._downloader.write_debug(f'[{self.IE_NAME}] {msg}', *args, **kwargs)
1118
1119 def get_param(self, name, default=None, *args, **kwargs):
1120 if self._downloader:
1121 return self._downloader.params.get(name, default, *args, **kwargs)
1122 return default
1123
1124 def report_drm(self, video_id, partial=NO_DEFAULT):
1125 if partial is not NO_DEFAULT:
1126 self._downloader.deprecation_warning('InfoExtractor.report_drm no longer accepts the argument partial')
1127 self.raise_no_formats('This video is DRM protected', expected=True, video_id=video_id)
1128
1129 def report_extraction(self, id_or_name):
1130 """Report information extraction."""
1131 self.to_screen('%s: Extracting information' % id_or_name)
1132
1133 def report_download_webpage(self, video_id):
1134 """Report webpage download."""
1135 self.to_screen('%s: Downloading webpage' % video_id)
1136
1137 def report_age_confirmation(self):
1138 """Report attempt to confirm age."""
1139 self.to_screen('Confirming age')
1140
1141 def report_login(self):
1142 """Report attempt to log in."""
1143 self.to_screen('Logging in')
1144
1145 def raise_login_required(
1146 self, msg='This video is only available for registered users',
1147 metadata_available=False, method=NO_DEFAULT):
1148 if metadata_available and (
1149 self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
1150 self.report_warning(msg)
1151 return
1152 msg += format_field(self._login_hint(method), None, '. %s')
1153 raise ExtractorError(msg, expected=True)
1154
1155 def raise_geo_restricted(
1156 self, msg='This video is not available from your location due to geo restriction',
1157 countries=None, metadata_available=False):
1158 if metadata_available and (
1159 self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
1160 self.report_warning(msg)
1161 else:
1162 raise GeoRestrictedError(msg, countries=countries)
1163
1164 def raise_no_formats(self, msg, expected=False, video_id=None):
1165 if expected and (
1166 self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
1167 self.report_warning(msg, video_id)
1168 elif isinstance(msg, ExtractorError):
1169 raise msg
1170 else:
1171 raise ExtractorError(msg, expected=expected, video_id=video_id)
1172
1173 # Methods for following #608
1174 @staticmethod
1175 def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):
1176 """Returns a URL that points to a page that should be processed"""
1177 if ie is not None:
1178 kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key()
1179 if video_id is not None:
1180 kwargs['id'] = video_id
1181 if video_title is not None:
1182 kwargs['title'] = video_title
1183 return {
1184 **kwargs,
1185 '_type': 'url_transparent' if url_transparent else 'url',
1186 'url': url,
1187 }
1188
1189 @classmethod
1190 def playlist_from_matches(cls, matches, playlist_id=None, playlist_title=None,
1191 getter=IDENTITY, ie=None, video_kwargs=None, **kwargs):
1192 return cls.playlist_result(
1193 (cls.url_result(m, ie, **(video_kwargs or {})) for m in orderedSet(map(getter, matches), lazy=True)),
1194 playlist_id, playlist_title, **kwargs)
1195
1196 @staticmethod
1197 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
1198 """Returns a playlist"""
1199 if playlist_id:
1200 kwargs['id'] = playlist_id
1201 if playlist_title:
1202 kwargs['title'] = playlist_title
1203 if playlist_description is not None:
1204 kwargs['description'] = playlist_description
1205 return {
1206 **kwargs,
1207 '_type': 'multi_video' if multi_video else 'playlist',
1208 'entries': entries,
1209 }
1210
1211 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1212 """
1213 Perform a regex search on the given string, using a single or a list of
1214 patterns returning the first matching group.
1215 In case of failure return a default value or raise a WARNING or a
1216 RegexNotFoundError, depending on fatal, specifying the field name.
1217 """
1218 if string is None:
1219 mobj = None
1220 elif isinstance(pattern, (str, re.Pattern)):
1221 mobj = re.search(pattern, string, flags)
1222 else:
1223 for p in pattern:
1224 mobj = re.search(p, string, flags)
1225 if mobj:
1226 break
1227
1228 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1229
1230 if mobj:
1231 if group is None:
1232 # return the first matching group
1233 return next(g for g in mobj.groups() if g is not None)
1234 elif isinstance(group, (list, tuple)):
1235 return tuple(mobj.group(g) for g in group)
1236 else:
1237 return mobj.group(group)
1238 elif default is not NO_DEFAULT:
1239 return default
1240 elif fatal:
1241 raise RegexNotFoundError('Unable to extract %s' % _name)
1242 else:
1243 self.report_warning('unable to extract %s' % _name + bug_reports_message())
1244 return None
1245
1246 def _search_json(self, start_pattern, string, name, video_id, *, end_pattern='',
1247 contains_pattern=r'{(?s:.+)}', fatal=True, default=NO_DEFAULT, **kwargs):
1248 """Searches string for the JSON object specified by start_pattern"""
1249 # NB: end_pattern is only used to reduce the size of the initial match
1250 if default is NO_DEFAULT:
1251 default, has_default = {}, False
1252 else:
1253 fatal, has_default = False, True
1254
1255 json_string = self._search_regex(
1256 rf'(?:{start_pattern})\s*(?P<json>{contains_pattern})\s*(?:{end_pattern})',
1257 string, name, group='json', fatal=fatal, default=None if has_default else NO_DEFAULT)
1258 if not json_string:
1259 return default
1260
1261 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1262 try:
1263 return self._parse_json(json_string, video_id, ignore_extra=True, **kwargs)
1264 except ExtractorError as e:
1265 if fatal:
1266 raise ExtractorError(
1267 f'Unable to extract {_name} - Failed to parse JSON', cause=e.cause, video_id=video_id)
1268 elif not has_default:
1269 self.report_warning(
1270 f'Unable to extract {_name} - Failed to parse JSON: {e}', video_id=video_id)
1271 return default
1272
1273 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1274 """
1275 Like _search_regex, but strips HTML tags and unescapes entities.
1276 """
1277 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
1278 if isinstance(res, tuple):
1279 return tuple(map(clean_html, res))
1280 return clean_html(res)
1281
1282 def _get_netrc_login_info(self, netrc_machine=None):
1283 username = None
1284 password = None
1285 netrc_machine = netrc_machine or self._NETRC_MACHINE
1286
1287 if self.get_param('usenetrc', False):
1288 try:
1289 netrc_file = compat_expanduser(self.get_param('netrc_location') or '~')
1290 if os.path.isdir(netrc_file):
1291 netrc_file = os.path.join(netrc_file, '.netrc')
1292 info = netrc.netrc(file=netrc_file).authenticators(netrc_machine)
1293 if info is not None:
1294 username = info[0]
1295 password = info[2]
1296 else:
1297 raise netrc.NetrcParseError(
1298 'No authenticators for %s' % netrc_machine)
1299 except (OSError, netrc.NetrcParseError) as err:
1300 self.report_warning(
1301 'parsing .netrc: %s' % error_to_compat_str(err))
1302
1303 return username, password
1304
1305 def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
1306 """
1307 Get the login info as (username, password)
1308 First look for the manually specified credentials using username_option
1309 and password_option as keys in params dictionary. If no such credentials
1310 available look in the netrc file using the netrc_machine or _NETRC_MACHINE
1311 value.
1312 If there's no info available, return (None, None)
1313 """
1314
1315 # Attempt to use provided username and password or .netrc data
1316 username = self.get_param(username_option)
1317 if username is not None:
1318 password = self.get_param(password_option)
1319 else:
1320 username, password = self._get_netrc_login_info(netrc_machine)
1321
1322 return username, password
1323
1324 def _get_tfa_info(self, note='two-factor verification code'):
1325 """
1326 Get the two-factor authentication info
1327 TODO - asking the user will be required for sms/phone verify
1328 currently just uses the command line option
1329 If there's no info available, return None
1330 """
1331
1332 tfa = self.get_param('twofactor')
1333 if tfa is not None:
1334 return tfa
1335
1336 return getpass.getpass('Type %s and press [Return]: ' % note)
1337
1338 # Helper functions for extracting OpenGraph info
1339 @staticmethod
1340 def _og_regexes(prop):
1341 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?)(?=\s|/?>))'
1342 property_re = (r'(?:name|property)=(?:\'og%(sep)s%(prop)s\'|"og%(sep)s%(prop)s"|\s*og%(sep)s%(prop)s\b)'
1343 % {'prop': re.escape(prop), 'sep': '(?:&#x3A;|[:-])'})
1344 template = r'<meta[^>]+?%s[^>]+?%s'
1345 return [
1346 template % (property_re, content_re),
1347 template % (content_re, property_re),
1348 ]
1349
1350 @staticmethod
1351 def _meta_regex(prop):
1352 return r'''(?isx)<meta
1353 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
1354 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
1355
1356 def _og_search_property(self, prop, html, name=None, **kargs):
1357 prop = variadic(prop)
1358 if name is None:
1359 name = 'OpenGraph %s' % prop[0]
1360 og_regexes = []
1361 for p in prop:
1362 og_regexes.extend(self._og_regexes(p))
1363 escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
1364 if escaped is None:
1365 return None
1366 return unescapeHTML(escaped)
1367
1368 def _og_search_thumbnail(self, html, **kargs):
1369 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
1370
1371 def _og_search_description(self, html, **kargs):
1372 return self._og_search_property('description', html, fatal=False, **kargs)
1373
1374 def _og_search_title(self, html, *, fatal=False, **kargs):
1375 return self._og_search_property('title', html, fatal=fatal, **kargs)
1376
1377 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
1378 regexes = self._og_regexes('video') + self._og_regexes('video:url')
1379 if secure:
1380 regexes = self._og_regexes('video:secure_url') + regexes
1381 return self._html_search_regex(regexes, html, name, **kargs)
1382
1383 def _og_search_url(self, html, **kargs):
1384 return self._og_search_property('url', html, **kargs)
1385
1386 def _html_extract_title(self, html, name='title', *, fatal=False, **kwargs):
1387 return self._html_search_regex(r'(?s)<title\b[^>]*>([^<]+)</title>', html, name, fatal=fatal, **kwargs)
1388
1389 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
1390 name = variadic(name)
1391 if display_name is None:
1392 display_name = name[0]
1393 return self._html_search_regex(
1394 [self._meta_regex(n) for n in name],
1395 html, display_name, fatal=fatal, group='content', **kwargs)
1396
1397 def _dc_search_uploader(self, html):
1398 return self._html_search_meta('dc.creator', html, 'uploader')
1399
1400 @staticmethod
1401 def _rta_search(html):
1402 # See http://www.rtalabel.org/index.php?content=howtofaq#single
1403 if re.search(r'(?ix)<meta\s+name="rating"\s+'
1404 r' content="RTA-5042-1996-1400-1577-RTA"',
1405 html):
1406 return 18
1407
1408 # And then there are the jokers who advertise that they use RTA, but actually don't.
1409 AGE_LIMIT_MARKERS = [
1410 r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
1411 r'>[^<]*you acknowledge you are at least (\d+) years old',
1412 r'>\s*(?:18\s+U(?:\.S\.C\.|SC)\s+)?(?:§+\s*)?2257\b',
1413 ]
1414
1415 age_limit = 0
1416 for marker in AGE_LIMIT_MARKERS:
1417 mobj = re.search(marker, html)
1418 if mobj:
1419 age_limit = max(age_limit, int(traverse_obj(mobj, 1, default=18)))
1420 return age_limit
1421
1422 def _media_rating_search(self, html):
1423 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
1424 rating = self._html_search_meta('rating', html)
1425
1426 if not rating:
1427 return None
1428
1429 RATING_TABLE = {
1430 'safe for kids': 0,
1431 'general': 8,
1432 '14 years': 14,
1433 'mature': 17,
1434 'restricted': 19,
1435 }
1436 return RATING_TABLE.get(rating.lower())
1437
1438 def _family_friendly_search(self, html):
1439 # See http://schema.org/VideoObject
1440 family_friendly = self._html_search_meta(
1441 'isFamilyFriendly', html, default=None)
1442
1443 if not family_friendly:
1444 return None
1445
1446 RATING_TABLE = {
1447 '1': 0,
1448 'true': 0,
1449 '0': 18,
1450 'false': 18,
1451 }
1452 return RATING_TABLE.get(family_friendly.lower())
1453
1454 def _twitter_search_player(self, html):
1455 return self._html_search_meta('twitter:player', html,
1456 'twitter card player')
1457
1458 def _yield_json_ld(self, html, video_id, *, fatal=True, default=NO_DEFAULT):
1459 """Yield all json ld objects in the html"""
1460 if default is not NO_DEFAULT:
1461 fatal = False
1462 for mobj in re.finditer(JSON_LD_RE, html):
1463 json_ld_item = self._parse_json(mobj.group('json_ld'), video_id, fatal=fatal)
1464 for json_ld in variadic(json_ld_item):
1465 if isinstance(json_ld, dict):
1466 yield json_ld
1467
1468 def _search_json_ld(self, html, video_id, expected_type=None, *, fatal=True, default=NO_DEFAULT):
1469 """Search for a video in any json ld in the html"""
1470 if default is not NO_DEFAULT:
1471 fatal = False
1472 info = self._json_ld(
1473 list(self._yield_json_ld(html, video_id, fatal=fatal, default=default)),
1474 video_id, fatal=fatal, expected_type=expected_type)
1475 if info:
1476 return info
1477 if default is not NO_DEFAULT:
1478 return default
1479 elif fatal:
1480 raise RegexNotFoundError('Unable to extract JSON-LD')
1481 else:
1482 self.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
1483 return {}
1484
1485 def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
1486 if isinstance(json_ld, str):
1487 json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
1488 if not json_ld:
1489 return {}
1490 info = {}
1491
1492 INTERACTION_TYPE_MAP = {
1493 'CommentAction': 'comment',
1494 'AgreeAction': 'like',
1495 'DisagreeAction': 'dislike',
1496 'LikeAction': 'like',
1497 'DislikeAction': 'dislike',
1498 'ListenAction': 'view',
1499 'WatchAction': 'view',
1500 'ViewAction': 'view',
1501 }
1502
1503 def is_type(e, *expected_types):
1504 type = variadic(traverse_obj(e, '@type'))
1505 return any(x in type for x in expected_types)
1506
1507 def extract_interaction_type(e):
1508 interaction_type = e.get('interactionType')
1509 if isinstance(interaction_type, dict):
1510 interaction_type = interaction_type.get('@type')
1511 return str_or_none(interaction_type)
1512
1513 def extract_interaction_statistic(e):
1514 interaction_statistic = e.get('interactionStatistic')
1515 if isinstance(interaction_statistic, dict):
1516 interaction_statistic = [interaction_statistic]
1517 if not isinstance(interaction_statistic, list):
1518 return
1519 for is_e in interaction_statistic:
1520 if not is_type(is_e, 'InteractionCounter'):
1521 continue
1522 interaction_type = extract_interaction_type(is_e)
1523 if not interaction_type:
1524 continue
1525 # For interaction count some sites provide string instead of
1526 # an integer (as per spec) with non digit characters (e.g. ",")
1527 # so extracting count with more relaxed str_to_int
1528 interaction_count = str_to_int(is_e.get('userInteractionCount'))
1529 if interaction_count is None:
1530 continue
1531 count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
1532 if not count_kind:
1533 continue
1534 count_key = '%s_count' % count_kind
1535 if info.get(count_key) is not None:
1536 continue
1537 info[count_key] = interaction_count
1538
1539 def extract_chapter_information(e):
1540 chapters = [{
1541 'title': part.get('name'),
1542 'start_time': part.get('startOffset'),
1543 'end_time': part.get('endOffset'),
1544 } for part in variadic(e.get('hasPart') or []) if part.get('@type') == 'Clip']
1545 for idx, (last_c, current_c, next_c) in enumerate(zip(
1546 [{'end_time': 0}] + chapters, chapters, chapters[1:])):
1547 current_c['end_time'] = current_c['end_time'] or next_c['start_time']
1548 current_c['start_time'] = current_c['start_time'] or last_c['end_time']
1549 if None in current_c.values():
1550 self.report_warning(f'Chapter {idx} contains broken data. Not extracting chapters')
1551 return
1552 if chapters:
1553 chapters[-1]['end_time'] = chapters[-1]['end_time'] or info['duration']
1554 info['chapters'] = chapters
1555
1556 def extract_video_object(e):
1557 author = e.get('author')
1558 info.update({
1559 'url': url_or_none(e.get('contentUrl')),
1560 'ext': mimetype2ext(e.get('encodingFormat')),
1561 'title': unescapeHTML(e.get('name')),
1562 'description': unescapeHTML(e.get('description')),
1563 'thumbnails': [{'url': unescapeHTML(url)}
1564 for url in variadic(traverse_obj(e, 'thumbnailUrl', 'thumbnailURL'))
1565 if url_or_none(url)],
1566 'duration': parse_duration(e.get('duration')),
1567 'timestamp': unified_timestamp(e.get('uploadDate')),
1568 # author can be an instance of 'Organization' or 'Person' types.
1569 # both types can have 'name' property(inherited from 'Thing' type). [1]
1570 # however some websites are using 'Text' type instead.
1571 # 1. https://schema.org/VideoObject
1572 'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, str) else None,
1573 'artist': traverse_obj(e, ('byArtist', 'name'), expected_type=str),
1574 'filesize': int_or_none(float_or_none(e.get('contentSize'))),
1575 'tbr': int_or_none(e.get('bitrate')),
1576 'width': int_or_none(e.get('width')),
1577 'height': int_or_none(e.get('height')),
1578 'view_count': int_or_none(e.get('interactionCount')),
1579 'tags': try_call(lambda: e.get('keywords').split(',')),
1580 })
1581 if is_type(e, 'AudioObject'):
1582 info.update({
1583 'vcodec': 'none',
1584 'abr': int_or_none(e.get('bitrate')),
1585 })
1586 extract_interaction_statistic(e)
1587 extract_chapter_information(e)
1588
1589 def traverse_json_ld(json_ld, at_top_level=True):
1590 for e in variadic(json_ld):
1591 if not isinstance(e, dict):
1592 continue
1593 if at_top_level and '@context' not in e:
1594 continue
1595 if at_top_level and set(e.keys()) == {'@context', '@graph'}:
1596 traverse_json_ld(e['@graph'], at_top_level=False)
1597 continue
1598 if expected_type is not None and not is_type(e, expected_type):
1599 continue
1600 rating = traverse_obj(e, ('aggregateRating', 'ratingValue'), expected_type=float_or_none)
1601 if rating is not None:
1602 info['average_rating'] = rating
1603 if is_type(e, 'TVEpisode', 'Episode'):
1604 episode_name = unescapeHTML(e.get('name'))
1605 info.update({
1606 'episode': episode_name,
1607 'episode_number': int_or_none(e.get('episodeNumber')),
1608 'description': unescapeHTML(e.get('description')),
1609 })
1610 if not info.get('title') and episode_name:
1611 info['title'] = episode_name
1612 part_of_season = e.get('partOfSeason')
1613 if is_type(part_of_season, 'TVSeason', 'Season', 'CreativeWorkSeason'):
1614 info.update({
1615 'season': unescapeHTML(part_of_season.get('name')),
1616 'season_number': int_or_none(part_of_season.get('seasonNumber')),
1617 })
1618 part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
1619 if is_type(part_of_series, 'TVSeries', 'Series', 'CreativeWorkSeries'):
1620 info['series'] = unescapeHTML(part_of_series.get('name'))
1621 elif is_type(e, 'Movie'):
1622 info.update({
1623 'title': unescapeHTML(e.get('name')),
1624 'description': unescapeHTML(e.get('description')),
1625 'duration': parse_duration(e.get('duration')),
1626 'timestamp': unified_timestamp(e.get('dateCreated')),
1627 })
1628 elif is_type(e, 'Article', 'NewsArticle'):
1629 info.update({
1630 'timestamp': parse_iso8601(e.get('datePublished')),
1631 'title': unescapeHTML(e.get('headline')),
1632 'description': unescapeHTML(e.get('articleBody') or e.get('description')),
1633 })
1634 if is_type(traverse_obj(e, ('video', 0)), 'VideoObject'):
1635 extract_video_object(e['video'][0])
1636 elif is_type(traverse_obj(e, ('subjectOf', 0)), 'VideoObject'):
1637 extract_video_object(e['subjectOf'][0])
1638 elif is_type(e, 'VideoObject', 'AudioObject'):
1639 extract_video_object(e)
1640 if expected_type is None:
1641 continue
1642 else:
1643 break
1644 video = e.get('video')
1645 if is_type(video, 'VideoObject'):
1646 extract_video_object(video)
1647 if expected_type is None:
1648 continue
1649 else:
1650 break
1651
1652 traverse_json_ld(json_ld)
1653 return filter_dict(info)
1654
1655 def _search_nextjs_data(self, webpage, video_id, *, transform_source=None, fatal=True, **kw):
1656 return self._parse_json(
1657 self._search_regex(
1658 r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
1659 webpage, 'next.js data', fatal=fatal, **kw),
1660 video_id, transform_source=transform_source, fatal=fatal)
1661
1662 def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__', *, fatal=True, traverse=('data', 0)):
1663 """Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
1664 rectx = re.escape(context_name)
1665 FUNCTION_RE = r'\(function\((?P<arg_keys>.*?)\){return\s+(?P<js>{.*?})\s*;?\s*}\((?P<arg_vals>.*?)\)'
1666 js, arg_keys, arg_vals = self._search_regex(
1667 (rf'<script>\s*window\.{rectx}={FUNCTION_RE}\s*\)\s*;?\s*</script>', rf'{rectx}\(.*?{FUNCTION_RE}'),
1668 webpage, context_name, group=('js', 'arg_keys', 'arg_vals'),
1669 default=NO_DEFAULT if fatal else (None, None, None))
1670 if js is None:
1671 return {}
1672
1673 args = dict(zip(arg_keys.split(','), map(json.dumps, self._parse_json(
1674 f'[{arg_vals}]', video_id, transform_source=js_to_json, fatal=fatal) or ())))
1675
1676 ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
1677 return traverse_obj(ret, traverse) or {}
1678
1679 @staticmethod
1680 def _hidden_inputs(html):
1681 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
1682 hidden_inputs = {}
1683 for input in re.findall(r'(?i)(<input[^>]+>)', html):
1684 attrs = extract_attributes(input)
1685 if not input:
1686 continue
1687 if attrs.get('type') not in ('hidden', 'submit'):
1688 continue
1689 name = attrs.get('name') or attrs.get('id')
1690 value = attrs.get('value')
1691 if name and value is not None:
1692 hidden_inputs[name] = value
1693 return hidden_inputs
1694
1695 def _form_hidden_inputs(self, form_id, html):
1696 form = self._search_regex(
1697 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
1698 html, '%s form' % form_id, group='form')
1699 return self._hidden_inputs(form)
1700
1701 @classproperty(cache=True)
1702 def FormatSort(cls):
1703 class FormatSort(FormatSorter):
1704 def __init__(ie, *args, **kwargs):
1705 super().__init__(ie._downloader, *args, **kwargs)
1706
1707 deprecation_warning(
1708 'yt_dlp.InfoExtractor.FormatSort is deprecated and may be removed in the future. '
1709 'Use yt_dlp.utils.FormatSorter instead')
1710 return FormatSort
1711
1712 def _sort_formats(self, formats, field_preference=[]):
1713 if not field_preference:
1714 self._downloader.deprecation_warning(
1715 'yt_dlp.InfoExtractor._sort_formats is deprecated and is no longer required')
1716 return
1717 self._downloader.deprecation_warning(
1718 'yt_dlp.InfoExtractor._sort_formats is deprecated and no longer works as expected. '
1719 'Return _format_sort_fields in the info_dict instead')
1720 if formats:
1721 formats[0]['__sort_fields'] = field_preference
1722
1723 def _check_formats(self, formats, video_id):
1724 if formats:
1725 formats[:] = filter(
1726 lambda f: self._is_valid_url(
1727 f['url'], video_id,
1728 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1729 formats)
1730
1731 @staticmethod
1732 def _remove_duplicate_formats(formats):
1733 format_urls = set()
1734 unique_formats = []
1735 for f in formats:
1736 if f['url'] not in format_urls:
1737 format_urls.add(f['url'])
1738 unique_formats.append(f)
1739 formats[:] = unique_formats
1740
1741 def _is_valid_url(self, url, video_id, item='video', headers={}):
1742 url = self._proto_relative_url(url, scheme='http:')
1743 # For now assume non HTTP(S) URLs always valid
1744 if not (url.startswith('http://') or url.startswith('https://')):
1745 return True
1746 try:
1747 self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
1748 return True
1749 except ExtractorError as e:
1750 self.to_screen(
1751 '%s: %s URL is invalid, skipping: %s'
1752 % (video_id, item, error_to_compat_str(e.cause)))
1753 return False
1754
1755 def http_scheme(self):
1756 """ Either "http:" or "https:", depending on the user's preferences """
1757 return (
1758 'http:'
1759 if self.get_param('prefer_insecure', False)
1760 else 'https:')
1761
1762 def _proto_relative_url(self, url, scheme=None):
1763 scheme = scheme or self.http_scheme()
1764 assert scheme.endswith(':')
1765 return sanitize_url(url, scheme=scheme[:-1])
1766
1767 def _sleep(self, timeout, video_id, msg_template=None):
1768 if msg_template is None:
1769 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
1770 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
1771 self.to_screen(msg)
1772 time.sleep(timeout)
1773
1774 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
1775 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1776 fatal=True, m3u8_id=None, data=None, headers={}, query={}):
1777 if self.get_param('ignore_no_formats_error'):
1778 fatal = False
1779
1780 res = self._download_xml_handle(
1781 manifest_url, video_id, 'Downloading f4m manifest',
1782 'Unable to download f4m manifest',
1783 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
1784 # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
1785 transform_source=transform_source,
1786 fatal=fatal, data=data, headers=headers, query=query)
1787 if res is False:
1788 return []
1789
1790 manifest, urlh = res
1791 manifest_url = urlh.geturl()
1792
1793 return self._parse_f4m_formats(
1794 manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
1795 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
1796
1797 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
1798 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1799 fatal=True, m3u8_id=None):
1800 if not isinstance(manifest, xml.etree.ElementTree.Element) and not fatal:
1801 return []
1802
1803 # currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
1804 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1805 if akamai_pv is not None and ';' in akamai_pv.text:
1806 playerVerificationChallenge = akamai_pv.text.split(';')[0]
1807 if playerVerificationChallenge.strip() != '':
1808 return []
1809
1810 formats = []
1811 manifest_version = '1.0'
1812 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
1813 if not media_nodes:
1814 manifest_version = '2.0'
1815 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
1816 # Remove unsupported DRM protected media from final formats
1817 # rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
1818 media_nodes = remove_encrypted_media(media_nodes)
1819 if not media_nodes:
1820 return formats
1821
1822 manifest_base_url = get_base_url(manifest)
1823
1824 bootstrap_info = xpath_element(
1825 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1826 'bootstrap info', default=None)
1827
1828 vcodec = None
1829 mime_type = xpath_text(
1830 manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
1831 'base URL', default=None)
1832 if mime_type and mime_type.startswith('audio/'):
1833 vcodec = 'none'
1834
1835 for i, media_el in enumerate(media_nodes):
1836 tbr = int_or_none(media_el.attrib.get('bitrate'))
1837 width = int_or_none(media_el.attrib.get('width'))
1838 height = int_or_none(media_el.attrib.get('height'))
1839 format_id = join_nonempty(f4m_id, tbr or i)
1840 # If <bootstrapInfo> is present, the specified f4m is a
1841 # stream-level manifest, and only set-level manifests may refer to
1842 # external resources. See section 11.4 and section 4 of F4M spec
1843 if bootstrap_info is None:
1844 media_url = None
1845 # @href is introduced in 2.0, see section 11.6 of F4M spec
1846 if manifest_version == '2.0':
1847 media_url = media_el.attrib.get('href')
1848 if media_url is None:
1849 media_url = media_el.attrib.get('url')
1850 if not media_url:
1851 continue
1852 manifest_url = (
1853 media_url if media_url.startswith('http://') or media_url.startswith('https://')
1854 else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
1855 # If media_url is itself a f4m manifest do the recursive extraction
1856 # since bitrates in parent manifest (this one) and media_url manifest
1857 # may differ leading to inability to resolve the format by requested
1858 # bitrate in f4m downloader
1859 ext = determine_ext(manifest_url)
1860 if ext == 'f4m':
1861 f4m_formats = self._extract_f4m_formats(
1862 manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
1863 transform_source=transform_source, fatal=fatal)
1864 # Sometimes stream-level manifest contains single media entry that
1865 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1866 # At the same time parent's media entry in set-level manifest may
1867 # contain it. We will copy it from parent in such cases.
1868 if len(f4m_formats) == 1:
1869 f = f4m_formats[0]
1870 f.update({
1871 'tbr': f.get('tbr') or tbr,
1872 'width': f.get('width') or width,
1873 'height': f.get('height') or height,
1874 'format_id': f.get('format_id') if not tbr else format_id,
1875 'vcodec': vcodec,
1876 })
1877 formats.extend(f4m_formats)
1878 continue
1879 elif ext == 'm3u8':
1880 formats.extend(self._extract_m3u8_formats(
1881 manifest_url, video_id, 'mp4', preference=preference,
1882 quality=quality, m3u8_id=m3u8_id, fatal=fatal))
1883 continue
1884 formats.append({
1885 'format_id': format_id,
1886 'url': manifest_url,
1887 'manifest_url': manifest_url,
1888 'ext': 'flv' if bootstrap_info is not None else None,
1889 'protocol': 'f4m',
1890 'tbr': tbr,
1891 'width': width,
1892 'height': height,
1893 'vcodec': vcodec,
1894 'preference': preference,
1895 'quality': quality,
1896 })
1897 return formats
1898
1899 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None):
1900 return {
1901 'format_id': join_nonempty(m3u8_id, 'meta'),
1902 'url': m3u8_url,
1903 'ext': ext,
1904 'protocol': 'm3u8',
1905 'preference': preference - 100 if preference else -100,
1906 'quality': quality,
1907 'resolution': 'multiple',
1908 'format_note': 'Quality selection URL',
1909 }
1910
1911 def _report_ignoring_subs(self, name):
1912 self.report_warning(bug_reports_message(
1913 f'Ignoring subtitle tracks found in the {name} manifest; '
1914 'if any subtitle tracks are missing,'
1915 ), only_once=True)
1916
1917 def _extract_m3u8_formats(self, *args, **kwargs):
1918 fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs)
1919 if subs:
1920 self._report_ignoring_subs('HLS')
1921 return fmts
1922
1923 def _extract_m3u8_formats_and_subtitles(
1924 self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native',
1925 preference=None, quality=None, m3u8_id=None, note=None,
1926 errnote=None, fatal=True, live=False, data=None, headers={},
1927 query={}):
1928
1929 if self.get_param('ignore_no_formats_error'):
1930 fatal = False
1931
1932 if not m3u8_url:
1933 if errnote is not False:
1934 errnote = errnote or 'Failed to obtain m3u8 URL'
1935 if fatal:
1936 raise ExtractorError(errnote, video_id=video_id)
1937 self.report_warning(f'{errnote}{bug_reports_message()}')
1938 return [], {}
1939
1940 res = self._download_webpage_handle(
1941 m3u8_url, video_id,
1942 note='Downloading m3u8 information' if note is None else note,
1943 errnote='Failed to download m3u8 information' if errnote is None else errnote,
1944 fatal=fatal, data=data, headers=headers, query=query)
1945
1946 if res is False:
1947 return [], {}
1948
1949 m3u8_doc, urlh = res
1950 m3u8_url = urlh.geturl()
1951
1952 return self._parse_m3u8_formats_and_subtitles(
1953 m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
1954 preference=preference, quality=quality, m3u8_id=m3u8_id,
1955 note=note, errnote=errnote, fatal=fatal, live=live, data=data,
1956 headers=headers, query=query, video_id=video_id)
1957
1958 def _parse_m3u8_formats_and_subtitles(
1959 self, m3u8_doc, m3u8_url=None, ext=None, entry_protocol='m3u8_native',
1960 preference=None, quality=None, m3u8_id=None, live=False, note=None,
1961 errnote=None, fatal=True, data=None, headers={}, query={},
1962 video_id=None):
1963 formats, subtitles = [], {}
1964
1965 has_drm = re.search('|'.join([
1966 r'#EXT-X-FAXS-CM:', # Adobe Flash Access
1967 r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', # Apple FairPlay
1968 ]), m3u8_doc)
1969
1970 def format_url(url):
1971 return url if re.match(r'^https?://', url) else urllib.parse.urljoin(m3u8_url, url)
1972
1973 if self.get_param('hls_split_discontinuity', False):
1974 def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
1975 if not m3u8_doc:
1976 if not manifest_url:
1977 return []
1978 m3u8_doc = self._download_webpage(
1979 manifest_url, video_id, fatal=fatal, data=data, headers=headers,
1980 note=False, errnote='Failed to download m3u8 playlist information')
1981 if m3u8_doc is False:
1982 return []
1983 return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines()))
1984
1985 else:
1986 def _extract_m3u8_playlist_indices(*args, **kwargs):
1987 return [None]
1988
1989 # References:
1990 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
1991 # 2. https://github.com/ytdl-org/youtube-dl/issues/12211
1992 # 3. https://github.com/ytdl-org/youtube-dl/issues/18923
1993
1994 # We should try extracting formats only from master playlists [1, 4.3.4],
1995 # i.e. playlists that describe available qualities. On the other hand
1996 # media playlists [1, 4.3.3] should be returned as is since they contain
1997 # just the media without qualities renditions.
1998 # Fortunately, master playlist can be easily distinguished from media
1999 # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
2000 # master playlist tags MUST NOT appear in a media playlist and vice versa.
2001 # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
2002 # media playlist and MUST NOT appear in master playlist thus we can
2003 # clearly detect media playlist with this criterion.
2004
2005 if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
2006 formats = [{
2007 'format_id': join_nonempty(m3u8_id, idx),
2008 'format_index': idx,
2009 'url': m3u8_url or encode_data_uri(m3u8_doc.encode('utf-8'), 'application/x-mpegurl'),
2010 'ext': ext,
2011 'protocol': entry_protocol,
2012 'preference': preference,
2013 'quality': quality,
2014 'has_drm': has_drm,
2015 } for idx in _extract_m3u8_playlist_indices(m3u8_doc=m3u8_doc)]
2016
2017 return formats, subtitles
2018
2019 groups = {}
2020 last_stream_inf = {}
2021
2022 def extract_media(x_media_line):
2023 media = parse_m3u8_attributes(x_media_line)
2024 # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
2025 media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
2026 if not (media_type and group_id and name):
2027 return
2028 groups.setdefault(group_id, []).append(media)
2029 # <https://tools.ietf.org/html/rfc8216#section-4.3.4.1>
2030 if media_type == 'SUBTITLES':
2031 # According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the
2032 # EXT-X-MEDIA tag if the media type is SUBTITLES.
2033 # However, lack of URI has been spotted in the wild.
2034 # e.g. NebulaIE; see https://github.com/yt-dlp/yt-dlp/issues/339
2035 if not media.get('URI'):
2036 return
2037 url = format_url(media['URI'])
2038 sub_info = {
2039 'url': url,
2040 'ext': determine_ext(url),
2041 }
2042 if sub_info['ext'] == 'm3u8':
2043 # Per RFC 8216 §3.1, the only possible subtitle format m3u8
2044 # files may contain is WebVTT:
2045 # <https://tools.ietf.org/html/rfc8216#section-3.1>
2046 sub_info['ext'] = 'vtt'
2047 sub_info['protocol'] = 'm3u8_native'
2048 lang = media.get('LANGUAGE') or 'und'
2049 subtitles.setdefault(lang, []).append(sub_info)
2050 if media_type not in ('VIDEO', 'AUDIO'):
2051 return
2052 media_url = media.get('URI')
2053 if media_url:
2054 manifest_url = format_url(media_url)
2055 formats.extend({
2056 'format_id': join_nonempty(m3u8_id, group_id, name, idx),
2057 'format_note': name,
2058 'format_index': idx,
2059 'url': manifest_url,
2060 'manifest_url': m3u8_url,
2061 'language': media.get('LANGUAGE'),
2062 'ext': ext,
2063 'protocol': entry_protocol,
2064 'preference': preference,
2065 'quality': quality,
2066 'vcodec': 'none' if media_type == 'AUDIO' else None,
2067 } for idx in _extract_m3u8_playlist_indices(manifest_url))
2068
2069 def build_stream_name():
2070 # Despite specification does not mention NAME attribute for
2071 # EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
2072 # or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
2073 # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
2074 stream_name = last_stream_inf.get('NAME')
2075 if stream_name:
2076 return stream_name
2077 # If there is no NAME in EXT-X-STREAM-INF it will be obtained
2078 # from corresponding rendition group
2079 stream_group_id = last_stream_inf.get('VIDEO')
2080 if not stream_group_id:
2081 return
2082 stream_group = groups.get(stream_group_id)
2083 if not stream_group:
2084 return stream_group_id
2085 rendition = stream_group[0]
2086 return rendition.get('NAME') or stream_group_id
2087
2088 # parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
2089 # chance to detect video only formats when EXT-X-STREAM-INF tags
2090 # precede EXT-X-MEDIA tags in HLS manifest such as [3].
2091 for line in m3u8_doc.splitlines():
2092 if line.startswith('#EXT-X-MEDIA:'):
2093 extract_media(line)
2094
2095 for line in m3u8_doc.splitlines():
2096 if line.startswith('#EXT-X-STREAM-INF:'):
2097 last_stream_inf = parse_m3u8_attributes(line)
2098 elif line.startswith('#') or not line.strip():
2099 continue
2100 else:
2101 tbr = float_or_none(
2102 last_stream_inf.get('AVERAGE-BANDWIDTH')
2103 or last_stream_inf.get('BANDWIDTH'), scale=1000)
2104 manifest_url = format_url(line.strip())
2105
2106 for idx in _extract_m3u8_playlist_indices(manifest_url):
2107 format_id = [m3u8_id, None, idx]
2108 # Bandwidth of live streams may differ over time thus making
2109 # format_id unpredictable. So it's better to keep provided
2110 # format_id intact.
2111 if not live:
2112 stream_name = build_stream_name()
2113 format_id[1] = stream_name or '%d' % (tbr or len(formats))
2114 f = {
2115 'format_id': join_nonempty(*format_id),
2116 'format_index': idx,
2117 'url': manifest_url,
2118 'manifest_url': m3u8_url,
2119 'tbr': tbr,
2120 'ext': ext,
2121 'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
2122 'protocol': entry_protocol,
2123 'preference': preference,
2124 'quality': quality,
2125 }
2126 resolution = last_stream_inf.get('RESOLUTION')
2127 if resolution:
2128 mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
2129 if mobj:
2130 f['width'] = int(mobj.group('width'))
2131 f['height'] = int(mobj.group('height'))
2132 # Unified Streaming Platform
2133 mobj = re.search(
2134 r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
2135 if mobj:
2136 abr, vbr = mobj.groups()
2137 abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
2138 f.update({
2139 'vbr': vbr,
2140 'abr': abr,
2141 })
2142 codecs = parse_codecs(last_stream_inf.get('CODECS'))
2143 f.update(codecs)
2144 audio_group_id = last_stream_inf.get('AUDIO')
2145 # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
2146 # references a rendition group MUST have a CODECS attribute.
2147 # However, this is not always respected. E.g. [2]
2148 # contains EXT-X-STREAM-INF tag which references AUDIO
2149 # rendition group but does not have CODECS and despite
2150 # referencing an audio group it represents a complete
2151 # (with audio and video) format. So, for such cases we will
2152 # ignore references to rendition groups and treat them
2153 # as complete formats.
2154 if audio_group_id and codecs and f.get('vcodec') != 'none':
2155 audio_group = groups.get(audio_group_id)
2156 if audio_group and audio_group[0].get('URI'):
2157 # TODO: update acodec for audio only formats with
2158 # the same GROUP-ID
2159 f['acodec'] = 'none'
2160 if not f.get('ext'):
2161 f['ext'] = 'm4a' if f.get('vcodec') == 'none' else 'mp4'
2162 formats.append(f)
2163
2164 # for DailyMotion
2165 progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
2166 if progressive_uri:
2167 http_f = f.copy()
2168 del http_f['manifest_url']
2169 http_f.update({
2170 'format_id': f['format_id'].replace('hls-', 'http-'),
2171 'protocol': 'http',
2172 'url': progressive_uri,
2173 })
2174 formats.append(http_f)
2175
2176 last_stream_inf = {}
2177 return formats, subtitles
2178
2179 def _extract_m3u8_vod_duration(
2180 self, m3u8_vod_url, video_id, note=None, errnote=None, data=None, headers={}, query={}):
2181
2182 m3u8_vod = self._download_webpage(
2183 m3u8_vod_url, video_id,
2184 note='Downloading m3u8 VOD manifest' if note is None else note,
2185 errnote='Failed to download VOD manifest' if errnote is None else errnote,
2186 fatal=False, data=data, headers=headers, query=query)
2187
2188 return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id)
2189
2190 def _parse_m3u8_vod_duration(self, m3u8_vod, video_id):
2191 if '#EXT-X-ENDLIST' not in m3u8_vod:
2192 return None
2193
2194 return int(sum(
2195 float(line[len('#EXTINF:'):].split(',')[0])
2196 for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None
2197
2198 def _extract_mpd_vod_duration(
2199 self, mpd_url, video_id, note=None, errnote=None, data=None, headers={}, query={}):
2200
2201 mpd_doc = self._download_xml(
2202 mpd_url, video_id,
2203 note='Downloading MPD VOD manifest' if note is None else note,
2204 errnote='Failed to download VOD manifest' if errnote is None else errnote,
2205 fatal=False, data=data, headers=headers, query=query) or {}
2206 return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
2207
2208 @staticmethod
2209 def _xpath_ns(path, namespace=None):
2210 if not namespace:
2211 return path
2212 out = []
2213 for c in path.split('/'):
2214 if not c or c == '.':
2215 out.append(c)
2216 else:
2217 out.append('{%s}%s' % (namespace, c))
2218 return '/'.join(out)
2219
2220 def _extract_smil_formats_and_subtitles(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
2221 if self.get_param('ignore_no_formats_error'):
2222 fatal = False
2223
2224 res = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
2225 if res is False:
2226 assert not fatal
2227 return [], {}
2228
2229 smil, urlh = res
2230 smil_url = urlh.geturl()
2231
2232 namespace = self._parse_smil_namespace(smil)
2233
2234 fmts = self._parse_smil_formats(
2235 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
2236 subs = self._parse_smil_subtitles(
2237 smil, namespace=namespace)
2238
2239 return fmts, subs
2240
2241 def _extract_smil_formats(self, *args, **kwargs):
2242 fmts, subs = self._extract_smil_formats_and_subtitles(*args, **kwargs)
2243 if subs:
2244 self._report_ignoring_subs('SMIL')
2245 return fmts
2246
2247 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
2248 res = self._download_smil(smil_url, video_id, fatal=fatal)
2249 if res is False:
2250 return {}
2251
2252 smil, urlh = res
2253 smil_url = urlh.geturl()
2254
2255 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
2256
2257 def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
2258 return self._download_xml_handle(
2259 smil_url, video_id, 'Downloading SMIL file',
2260 'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
2261
2262 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
2263 namespace = self._parse_smil_namespace(smil)
2264
2265 formats = self._parse_smil_formats(
2266 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
2267 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
2268
2269 video_id = os.path.splitext(url_basename(smil_url))[0]
2270 title = None
2271 description = None
2272 upload_date = None
2273 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
2274 name = meta.attrib.get('name')
2275 content = meta.attrib.get('content')
2276 if not name or not content:
2277 continue
2278 if not title and name == 'title':
2279 title = content
2280 elif not description and name in ('description', 'abstract'):
2281 description = content
2282 elif not upload_date and name == 'date':
2283 upload_date = unified_strdate(content)
2284
2285 thumbnails = [{
2286 'id': image.get('type'),
2287 'url': image.get('src'),
2288 'width': int_or_none(image.get('width')),
2289 'height': int_or_none(image.get('height')),
2290 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
2291
2292 return {
2293 'id': video_id,
2294 'title': title or video_id,
2295 'description': description,
2296 'upload_date': upload_date,
2297 'thumbnails': thumbnails,
2298 'formats': formats,
2299 'subtitles': subtitles,
2300 }
2301
2302 def _parse_smil_namespace(self, smil):
2303 return self._search_regex(
2304 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
2305
2306 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
2307 base = smil_url
2308 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
2309 b = meta.get('base') or meta.get('httpBase')
2310 if b:
2311 base = b
2312 break
2313
2314 formats = []
2315 rtmp_count = 0
2316 http_count = 0
2317 m3u8_count = 0
2318 imgs_count = 0
2319
2320 srcs = set()
2321 media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
2322 for medium in media:
2323 src = medium.get('src')
2324 if not src or src in srcs:
2325 continue
2326 srcs.add(src)
2327
2328 bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
2329 filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
2330 width = int_or_none(medium.get('width'))
2331 height = int_or_none(medium.get('height'))
2332 proto = medium.get('proto')
2333 ext = medium.get('ext')
2334 src_ext = determine_ext(src, default_ext=None) or ext or urlhandle_detect_ext(
2335 self._request_webpage(HEADRequest(src), video_id, note='Requesting extension info', fatal=False))
2336 streamer = medium.get('streamer') or base
2337
2338 if proto == 'rtmp' or streamer.startswith('rtmp'):
2339 rtmp_count += 1
2340 formats.append({
2341 'url': streamer,
2342 'play_path': src,
2343 'ext': 'flv',
2344 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
2345 'tbr': bitrate,
2346 'filesize': filesize,
2347 'width': width,
2348 'height': height,
2349 })
2350 if transform_rtmp_url:
2351 streamer, src = transform_rtmp_url(streamer, src)
2352 formats[-1].update({
2353 'url': streamer,
2354 'play_path': src,
2355 })
2356 continue
2357
2358 src_url = src if src.startswith('http') else urllib.parse.urljoin(base, src)
2359 src_url = src_url.strip()
2360
2361 if proto == 'm3u8' or src_ext == 'm3u8':
2362 m3u8_formats = self._extract_m3u8_formats(
2363 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
2364 if len(m3u8_formats) == 1:
2365 m3u8_count += 1
2366 m3u8_formats[0].update({
2367 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
2368 'tbr': bitrate,
2369 'width': width,
2370 'height': height,
2371 })
2372 formats.extend(m3u8_formats)
2373 elif src_ext == 'f4m':
2374 f4m_url = src_url
2375 if not f4m_params:
2376 f4m_params = {
2377 'hdcore': '3.2.0',
2378 'plugin': 'flowplayer-3.2.0.1',
2379 }
2380 f4m_url += '&' if '?' in f4m_url else '?'
2381 f4m_url += urllib.parse.urlencode(f4m_params)
2382 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
2383 elif src_ext == 'mpd':
2384 formats.extend(self._extract_mpd_formats(
2385 src_url, video_id, mpd_id='dash', fatal=False))
2386 elif re.search(r'\.ism/[Mm]anifest', src_url):
2387 formats.extend(self._extract_ism_formats(
2388 src_url, video_id, ism_id='mss', fatal=False))
2389 elif src_url.startswith('http') and self._is_valid_url(src, video_id):
2390 http_count += 1
2391 formats.append({
2392 'url': src_url,
2393 'ext': ext or src_ext or 'flv',
2394 'format_id': 'http-%d' % (bitrate or http_count),
2395 'tbr': bitrate,
2396 'filesize': filesize,
2397 'width': width,
2398 'height': height,
2399 })
2400
2401 for medium in smil.findall(self._xpath_ns('.//imagestream', namespace)):
2402 src = medium.get('src')
2403 if not src or src in srcs:
2404 continue
2405 srcs.add(src)
2406
2407 imgs_count += 1
2408 formats.append({
2409 'format_id': 'imagestream-%d' % (imgs_count),
2410 'url': src,
2411 'ext': mimetype2ext(medium.get('type')),
2412 'acodec': 'none',
2413 'vcodec': 'none',
2414 'width': int_or_none(medium.get('width')),
2415 'height': int_or_none(medium.get('height')),
2416 'format_note': 'SMIL storyboards',
2417 })
2418
2419 return formats
2420
2421 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
2422 urls = []
2423 subtitles = {}
2424 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
2425 src = textstream.get('src')
2426 if not src or src in urls:
2427 continue
2428 urls.append(src)
2429 ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
2430 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
2431 subtitles.setdefault(lang, []).append({
2432 'url': src,
2433 'ext': ext,
2434 })
2435 return subtitles
2436
2437 def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
2438 res = self._download_xml_handle(
2439 xspf_url, playlist_id, 'Downloading xpsf playlist',
2440 'Unable to download xspf manifest', fatal=fatal)
2441 if res is False:
2442 return []
2443
2444 xspf, urlh = res
2445 xspf_url = urlh.geturl()
2446
2447 return self._parse_xspf(
2448 xspf, playlist_id, xspf_url=xspf_url,
2449 xspf_base_url=base_url(xspf_url))
2450
2451 def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
2452 NS_MAP = {
2453 'xspf': 'http://xspf.org/ns/0/',
2454 's1': 'http://static.streamone.nl/player/ns/0',
2455 }
2456
2457 entries = []
2458 for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
2459 title = xpath_text(
2460 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
2461 description = xpath_text(
2462 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
2463 thumbnail = xpath_text(
2464 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
2465 duration = float_or_none(
2466 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
2467
2468 formats = []
2469 for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
2470 format_url = urljoin(xspf_base_url, location.text)
2471 if not format_url:
2472 continue
2473 formats.append({
2474 'url': format_url,
2475 'manifest_url': xspf_url,
2476 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
2477 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
2478 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
2479 })
2480
2481 entries.append({
2482 'id': playlist_id,
2483 'title': title,
2484 'description': description,
2485 'thumbnail': thumbnail,
2486 'duration': duration,
2487 'formats': formats,
2488 })
2489 return entries
2490
2491 def _extract_mpd_formats(self, *args, **kwargs):
2492 fmts, subs = self._extract_mpd_formats_and_subtitles(*args, **kwargs)
2493 if subs:
2494 self._report_ignoring_subs('DASH')
2495 return fmts
2496
2497 def _extract_mpd_formats_and_subtitles(
2498 self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
2499 fatal=True, data=None, headers={}, query={}):
2500
2501 if self.get_param('ignore_no_formats_error'):
2502 fatal = False
2503
2504 res = self._download_xml_handle(
2505 mpd_url, video_id,
2506 note='Downloading MPD manifest' if note is None else note,
2507 errnote='Failed to download MPD manifest' if errnote is None else errnote,
2508 fatal=fatal, data=data, headers=headers, query=query)
2509 if res is False:
2510 return [], {}
2511 mpd_doc, urlh = res
2512 if mpd_doc is None:
2513 return [], {}
2514
2515 # We could have been redirected to a new url when we retrieved our mpd file.
2516 mpd_url = urlh.geturl()
2517 mpd_base_url = base_url(mpd_url)
2518
2519 return self._parse_mpd_formats_and_subtitles(
2520 mpd_doc, mpd_id, mpd_base_url, mpd_url)
2521
2522 def _parse_mpd_formats(self, *args, **kwargs):
2523 fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
2524 if subs:
2525 self._report_ignoring_subs('DASH')
2526 return fmts
2527
2528 def _parse_mpd_formats_and_subtitles(
2529 self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
2530 """
2531 Parse formats from MPD manifest.
2532 References:
2533 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
2534 http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2535 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
2536 """
2537 if not self.get_param('dynamic_mpd', True):
2538 if mpd_doc.get('type') == 'dynamic':
2539 return [], {}
2540
2541 namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
2542
2543 def _add_ns(path):
2544 return self._xpath_ns(path, namespace)
2545
2546 def is_drm_protected(element):
2547 return element.find(_add_ns('ContentProtection')) is not None
2548
2549 def extract_multisegment_info(element, ms_parent_info):
2550 ms_info = ms_parent_info.copy()
2551
2552 # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
2553 # common attributes and elements. We will only extract relevant
2554 # for us.
2555 def extract_common(source):
2556 segment_timeline = source.find(_add_ns('SegmentTimeline'))
2557 if segment_timeline is not None:
2558 s_e = segment_timeline.findall(_add_ns('S'))
2559 if s_e:
2560 ms_info['total_number'] = 0
2561 ms_info['s'] = []
2562 for s in s_e:
2563 r = int(s.get('r', 0))
2564 ms_info['total_number'] += 1 + r
2565 ms_info['s'].append({
2566 't': int(s.get('t', 0)),
2567 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
2568 'd': int(s.attrib['d']),
2569 'r': r,
2570 })
2571 start_number = source.get('startNumber')
2572 if start_number:
2573 ms_info['start_number'] = int(start_number)
2574 timescale = source.get('timescale')
2575 if timescale:
2576 ms_info['timescale'] = int(timescale)
2577 segment_duration = source.get('duration')
2578 if segment_duration:
2579 ms_info['segment_duration'] = float(segment_duration)
2580
2581 def extract_Initialization(source):
2582 initialization = source.find(_add_ns('Initialization'))
2583 if initialization is not None:
2584 ms_info['initialization_url'] = initialization.attrib['sourceURL']
2585
2586 segment_list = element.find(_add_ns('SegmentList'))
2587 if segment_list is not None:
2588 extract_common(segment_list)
2589 extract_Initialization(segment_list)
2590 segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
2591 if segment_urls_e:
2592 ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
2593 else:
2594 segment_template = element.find(_add_ns('SegmentTemplate'))
2595 if segment_template is not None:
2596 extract_common(segment_template)
2597 media = segment_template.get('media')
2598 if media:
2599 ms_info['media'] = media
2600 initialization = segment_template.get('initialization')
2601 if initialization:
2602 ms_info['initialization'] = initialization
2603 else:
2604 extract_Initialization(segment_template)
2605 return ms_info
2606
2607 mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
2608 formats, subtitles = [], {}
2609 stream_numbers = collections.defaultdict(int)
2610 for period in mpd_doc.findall(_add_ns('Period')):
2611 period_duration = parse_duration(period.get('duration')) or mpd_duration
2612 period_ms_info = extract_multisegment_info(period, {
2613 'start_number': 1,
2614 'timescale': 1,
2615 })
2616 for adaptation_set in period.findall(_add_ns('AdaptationSet')):
2617 adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
2618 for representation in adaptation_set.findall(_add_ns('Representation')):
2619 representation_attrib = adaptation_set.attrib.copy()
2620 representation_attrib.update(representation.attrib)
2621 # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
2622 mime_type = representation_attrib['mimeType']
2623 content_type = representation_attrib.get('contentType', mime_type.split('/')[0])
2624
2625 codec_str = representation_attrib.get('codecs', '')
2626 # Some kind of binary subtitle found in some youtube livestreams
2627 if mime_type == 'application/x-rawcc':
2628 codecs = {'scodec': codec_str}
2629 else:
2630 codecs = parse_codecs(codec_str)
2631 if content_type not in ('video', 'audio', 'text'):
2632 if mime_type == 'image/jpeg':
2633 content_type = mime_type
2634 elif codecs.get('vcodec', 'none') != 'none':
2635 content_type = 'video'
2636 elif codecs.get('acodec', 'none') != 'none':
2637 content_type = 'audio'
2638 elif codecs.get('scodec', 'none') != 'none':
2639 content_type = 'text'
2640 elif mimetype2ext(mime_type) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
2641 content_type = 'text'
2642 else:
2643 self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
2644 continue
2645
2646 base_url = ''
2647 for element in (representation, adaptation_set, period, mpd_doc):
2648 base_url_e = element.find(_add_ns('BaseURL'))
2649 if try_call(lambda: base_url_e.text) is not None:
2650 base_url = base_url_e.text + base_url
2651 if re.match(r'^https?://', base_url):
2652 break
2653 if mpd_base_url and base_url.startswith('/'):
2654 base_url = urllib.parse.urljoin(mpd_base_url, base_url)
2655 elif mpd_base_url and not re.match(r'^https?://', base_url):
2656 if not mpd_base_url.endswith('/'):
2657 mpd_base_url += '/'
2658 base_url = mpd_base_url + base_url
2659 representation_id = representation_attrib.get('id')
2660 lang = representation_attrib.get('lang')
2661 url_el = representation.find(_add_ns('BaseURL'))
2662 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
2663 bandwidth = int_or_none(representation_attrib.get('bandwidth'))
2664 if representation_id is not None:
2665 format_id = representation_id
2666 else:
2667 format_id = content_type
2668 if mpd_id:
2669 format_id = mpd_id + '-' + format_id
2670 if content_type in ('video', 'audio'):
2671 f = {
2672 'format_id': format_id,
2673 'manifest_url': mpd_url,
2674 'ext': mimetype2ext(mime_type),
2675 'width': int_or_none(representation_attrib.get('width')),
2676 'height': int_or_none(representation_attrib.get('height')),
2677 'tbr': float_or_none(bandwidth, 1000),
2678 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
2679 'fps': int_or_none(representation_attrib.get('frameRate')),
2680 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
2681 'format_note': 'DASH %s' % content_type,
2682 'filesize': filesize,
2683 'container': mimetype2ext(mime_type) + '_dash',
2684 **codecs
2685 }
2686 elif content_type == 'text':
2687 f = {
2688 'ext': mimetype2ext(mime_type),
2689 'manifest_url': mpd_url,
2690 'filesize': filesize,
2691 }
2692 elif content_type == 'image/jpeg':
2693 # See test case in VikiIE
2694 # https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
2695 f = {
2696 'format_id': format_id,
2697 'ext': 'mhtml',
2698 'manifest_url': mpd_url,
2699 'format_note': 'DASH storyboards (jpeg)',
2700 'acodec': 'none',
2701 'vcodec': 'none',
2702 }
2703 if is_drm_protected(adaptation_set) or is_drm_protected(representation):
2704 f['has_drm'] = True
2705 representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
2706
2707 def prepare_template(template_name, identifiers):
2708 tmpl = representation_ms_info[template_name]
2709 if representation_id is not None:
2710 tmpl = tmpl.replace('$RepresentationID$', representation_id)
2711 # First of, % characters outside $...$ templates
2712 # must be escaped by doubling for proper processing
2713 # by % operator string formatting used further (see
2714 # https://github.com/ytdl-org/youtube-dl/issues/16867).
2715 t = ''
2716 in_template = False
2717 for c in tmpl:
2718 t += c
2719 if c == '$':
2720 in_template = not in_template
2721 elif c == '%' and not in_template:
2722 t += c
2723 # Next, $...$ templates are translated to their
2724 # %(...) counterparts to be used with % operator
2725 t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
2726 t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
2727 t.replace('$$', '$')
2728 return t
2729
2730 # @initialization is a regular template like @media one
2731 # so it should be handled just the same way (see
2732 # https://github.com/ytdl-org/youtube-dl/issues/11605)
2733 if 'initialization' in representation_ms_info:
2734 initialization_template = prepare_template(
2735 'initialization',
2736 # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
2737 # $Time$ shall not be included for @initialization thus
2738 # only $Bandwidth$ remains
2739 ('Bandwidth', ))
2740 representation_ms_info['initialization_url'] = initialization_template % {
2741 'Bandwidth': bandwidth,
2742 }
2743
2744 def location_key(location):
2745 return 'url' if re.match(r'^https?://', location) else 'path'
2746
2747 if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
2748
2749 media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
2750 media_location_key = location_key(media_template)
2751
2752 # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
2753 # can't be used at the same time
2754 if '%(Number' in media_template and 's' not in representation_ms_info:
2755 segment_duration = None
2756 if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
2757 segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
2758 representation_ms_info['total_number'] = int(math.ceil(
2759 float_or_none(period_duration, segment_duration, default=0)))
2760 representation_ms_info['fragments'] = [{
2761 media_location_key: media_template % {
2762 'Number': segment_number,
2763 'Bandwidth': bandwidth,
2764 },
2765 'duration': segment_duration,
2766 } for segment_number in range(
2767 representation_ms_info['start_number'],
2768 representation_ms_info['total_number'] + representation_ms_info['start_number'])]
2769 else:
2770 # $Number*$ or $Time$ in media template with S list available
2771 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
2772 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
2773 representation_ms_info['fragments'] = []
2774 segment_time = 0
2775 segment_d = None
2776 segment_number = representation_ms_info['start_number']
2777
2778 def add_segment_url():
2779 segment_url = media_template % {
2780 'Time': segment_time,
2781 'Bandwidth': bandwidth,
2782 'Number': segment_number,
2783 }
2784 representation_ms_info['fragments'].append({
2785 media_location_key: segment_url,
2786 'duration': float_or_none(segment_d, representation_ms_info['timescale']),
2787 })
2788
2789 for num, s in enumerate(representation_ms_info['s']):
2790 segment_time = s.get('t') or segment_time
2791 segment_d = s['d']
2792 add_segment_url()
2793 segment_number += 1
2794 for r in range(s.get('r', 0)):
2795 segment_time += segment_d
2796 add_segment_url()
2797 segment_number += 1
2798 segment_time += segment_d
2799 elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
2800 # No media template,
2801 # e.g. https://www.youtube.com/watch?v=iXZV5uAYMJI
2802 # or any YouTube dashsegments video
2803 fragments = []
2804 segment_index = 0
2805 timescale = representation_ms_info['timescale']
2806 for s in representation_ms_info['s']:
2807 duration = float_or_none(s['d'], timescale)
2808 for r in range(s.get('r', 0) + 1):
2809 segment_uri = representation_ms_info['segment_urls'][segment_index]
2810 fragments.append({
2811 location_key(segment_uri): segment_uri,
2812 'duration': duration,
2813 })
2814 segment_index += 1
2815 representation_ms_info['fragments'] = fragments
2816 elif 'segment_urls' in representation_ms_info:
2817 # Segment URLs with no SegmentTimeline
2818 # E.g. https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
2819 # https://github.com/ytdl-org/youtube-dl/pull/14844
2820 fragments = []
2821 segment_duration = float_or_none(
2822 representation_ms_info['segment_duration'],
2823 representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
2824 for segment_url in representation_ms_info['segment_urls']:
2825 fragment = {
2826 location_key(segment_url): segment_url,
2827 }
2828 if segment_duration:
2829 fragment['duration'] = segment_duration
2830 fragments.append(fragment)
2831 representation_ms_info['fragments'] = fragments
2832 # If there is a fragments key available then we correctly recognized fragmented media.
2833 # Otherwise we will assume unfragmented media with direct access. Technically, such
2834 # assumption is not necessarily correct since we may simply have no support for
2835 # some forms of fragmented media renditions yet, but for now we'll use this fallback.
2836 if 'fragments' in representation_ms_info:
2837 f.update({
2838 # NB: mpd_url may be empty when MPD manifest is parsed from a string
2839 'url': mpd_url or base_url,
2840 'fragment_base_url': base_url,
2841 'fragments': [],
2842 'protocol': 'http_dash_segments' if mime_type != 'image/jpeg' else 'mhtml',
2843 })
2844 if 'initialization_url' in representation_ms_info:
2845 initialization_url = representation_ms_info['initialization_url']
2846 if not f.get('url'):
2847 f['url'] = initialization_url
2848 f['fragments'].append({location_key(initialization_url): initialization_url})
2849 f['fragments'].extend(representation_ms_info['fragments'])
2850 if not period_duration:
2851 period_duration = try_get(
2852 representation_ms_info,
2853 lambda r: sum(frag['duration'] for frag in r['fragments']), float)
2854 else:
2855 # Assuming direct URL to unfragmented media.
2856 f['url'] = base_url
2857 if content_type in ('video', 'audio', 'image/jpeg'):
2858 f['manifest_stream_number'] = stream_numbers[f['url']]
2859 stream_numbers[f['url']] += 1
2860 formats.append(f)
2861 elif content_type == 'text':
2862 subtitles.setdefault(lang or 'und', []).append(f)
2863
2864 return formats, subtitles
2865
2866 def _extract_ism_formats(self, *args, **kwargs):
2867 fmts, subs = self._extract_ism_formats_and_subtitles(*args, **kwargs)
2868 if subs:
2869 self._report_ignoring_subs('ISM')
2870 return fmts
2871
2872 def _extract_ism_formats_and_subtitles(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
2873 if self.get_param('ignore_no_formats_error'):
2874 fatal = False
2875
2876 res = self._download_xml_handle(
2877 ism_url, video_id,
2878 note='Downloading ISM manifest' if note is None else note,
2879 errnote='Failed to download ISM manifest' if errnote is None else errnote,
2880 fatal=fatal, data=data, headers=headers, query=query)
2881 if res is False:
2882 return [], {}
2883 ism_doc, urlh = res
2884 if ism_doc is None:
2885 return [], {}
2886
2887 return self._parse_ism_formats_and_subtitles(ism_doc, urlh.geturl(), ism_id)
2888
2889 def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
2890 """
2891 Parse formats from ISM manifest.
2892 References:
2893 1. [MS-SSTR]: Smooth Streaming Protocol,
2894 https://msdn.microsoft.com/en-us/library/ff469518.aspx
2895 """
2896 if ism_doc.get('IsLive') == 'TRUE':
2897 return [], {}
2898
2899 duration = int(ism_doc.attrib['Duration'])
2900 timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
2901
2902 formats = []
2903 subtitles = {}
2904 for stream in ism_doc.findall('StreamIndex'):
2905 stream_type = stream.get('Type')
2906 if stream_type not in ('video', 'audio', 'text'):
2907 continue
2908 url_pattern = stream.attrib['Url']
2909 stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
2910 stream_name = stream.get('Name')
2911 stream_language = stream.get('Language', 'und')
2912 for track in stream.findall('QualityLevel'):
2913 KNOWN_TAGS = {'255': 'AACL', '65534': 'EC-3'}
2914 fourcc = track.get('FourCC') or KNOWN_TAGS.get(track.get('AudioTag'))
2915 # TODO: add support for WVC1 and WMAP
2916 if fourcc not in ('H264', 'AVC1', 'AACL', 'TTML', 'EC-3'):
2917 self.report_warning('%s is not a supported codec' % fourcc)
2918 continue
2919 tbr = int(track.attrib['Bitrate']) // 1000
2920 # [1] does not mention Width and Height attributes. However,
2921 # they're often present while MaxWidth and MaxHeight are
2922 # missing, so should be used as fallbacks
2923 width = int_or_none(track.get('MaxWidth') or track.get('Width'))
2924 height = int_or_none(track.get('MaxHeight') or track.get('Height'))
2925 sampling_rate = int_or_none(track.get('SamplingRate'))
2926
2927 track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
2928 track_url_pattern = urllib.parse.urljoin(ism_url, track_url_pattern)
2929
2930 fragments = []
2931 fragment_ctx = {
2932 'time': 0,
2933 }
2934 stream_fragments = stream.findall('c')
2935 for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
2936 fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
2937 fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
2938 fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
2939 if not fragment_ctx['duration']:
2940 try:
2941 next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
2942 except IndexError:
2943 next_fragment_time = duration
2944 fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
2945 for _ in range(fragment_repeat):
2946 fragments.append({
2947 'url': re.sub(r'{start[ _]time}', str(fragment_ctx['time']), track_url_pattern),
2948 'duration': fragment_ctx['duration'] / stream_timescale,
2949 })
2950 fragment_ctx['time'] += fragment_ctx['duration']
2951
2952 if stream_type == 'text':
2953 subtitles.setdefault(stream_language, []).append({
2954 'ext': 'ismt',
2955 'protocol': 'ism',
2956 'url': ism_url,
2957 'manifest_url': ism_url,
2958 'fragments': fragments,
2959 '_download_params': {
2960 'stream_type': stream_type,
2961 'duration': duration,
2962 'timescale': stream_timescale,
2963 'fourcc': fourcc,
2964 'language': stream_language,
2965 'codec_private_data': track.get('CodecPrivateData'),
2966 }
2967 })
2968 elif stream_type in ('video', 'audio'):
2969 formats.append({
2970 'format_id': join_nonempty(ism_id, stream_name, tbr),
2971 'url': ism_url,
2972 'manifest_url': ism_url,
2973 'ext': 'ismv' if stream_type == 'video' else 'isma',
2974 'width': width,
2975 'height': height,
2976 'tbr': tbr,
2977 'asr': sampling_rate,
2978 'vcodec': 'none' if stream_type == 'audio' else fourcc,
2979 'acodec': 'none' if stream_type == 'video' else fourcc,
2980 'protocol': 'ism',
2981 'fragments': fragments,
2982 'has_drm': ism_doc.find('Protection') is not None,
2983 '_download_params': {
2984 'stream_type': stream_type,
2985 'duration': duration,
2986 'timescale': stream_timescale,
2987 'width': width or 0,
2988 'height': height or 0,
2989 'fourcc': fourcc,
2990 'language': stream_language,
2991 'codec_private_data': track.get('CodecPrivateData'),
2992 'sampling_rate': sampling_rate,
2993 'channels': int_or_none(track.get('Channels', 2)),
2994 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
2995 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
2996 },
2997 })
2998 return formats, subtitles
2999
3000 def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8_native', mpd_id=None, preference=None, quality=None):
3001 def absolute_url(item_url):
3002 return urljoin(base_url, item_url)
3003
3004 def parse_content_type(content_type):
3005 if not content_type:
3006 return {}
3007 ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
3008 if ctr:
3009 mimetype, codecs = ctr.groups()
3010 f = parse_codecs(codecs)
3011 f['ext'] = mimetype2ext(mimetype)
3012 return f
3013 return {}
3014
3015 def _media_formats(src, cur_media_type, type_info=None):
3016 type_info = type_info or {}
3017 full_url = absolute_url(src)
3018 ext = type_info.get('ext') or determine_ext(full_url)
3019 if ext == 'm3u8':
3020 is_plain_url = False
3021 formats = self._extract_m3u8_formats(
3022 full_url, video_id, ext='mp4',
3023 entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
3024 preference=preference, quality=quality, fatal=False)
3025 elif ext == 'mpd':
3026 is_plain_url = False
3027 formats = self._extract_mpd_formats(
3028 full_url, video_id, mpd_id=mpd_id, fatal=False)
3029 else:
3030 is_plain_url = True
3031 formats = [{
3032 'url': full_url,
3033 'vcodec': 'none' if cur_media_type == 'audio' else None,
3034 'ext': ext,
3035 }]
3036 return is_plain_url, formats
3037
3038 entries = []
3039 # amp-video and amp-audio are very similar to their HTML5 counterparts
3040 # so we will include them right here (see
3041 # https://www.ampproject.org/docs/reference/components/amp-video)
3042 # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
3043 _MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
3044 media_tags = [(media_tag, media_tag_name, media_type, '')
3045 for media_tag, media_tag_name, media_type
3046 in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
3047 media_tags.extend(re.findall(
3048 # We only allow video|audio followed by a whitespace or '>'.
3049 # Allowing more characters may end up in significant slow down (see
3050 # https://github.com/ytdl-org/youtube-dl/issues/11979,
3051 # e.g. http://www.porntrex.com/maps/videositemap.xml).
3052 r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
3053 for media_tag, _, media_type, media_content in media_tags:
3054 media_info = {
3055 'formats': [],
3056 'subtitles': {},
3057 }
3058 media_attributes = extract_attributes(media_tag)
3059 src = strip_or_none(dict_get(media_attributes, ('src', 'data-video-src', 'data-src', 'data-source')))
3060 if src:
3061 f = parse_content_type(media_attributes.get('type'))
3062 _, formats = _media_formats(src, media_type, f)
3063 media_info['formats'].extend(formats)
3064 media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
3065 if media_content:
3066 for source_tag in re.findall(r'<source[^>]+>', media_content):
3067 s_attr = extract_attributes(source_tag)
3068 # data-video-src and data-src are non standard but seen
3069 # several times in the wild
3070 src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src', 'data-source')))
3071 if not src:
3072 continue
3073 f = parse_content_type(s_attr.get('type'))
3074 is_plain_url, formats = _media_formats(src, media_type, f)
3075 if is_plain_url:
3076 # width, height, res, label and title attributes are
3077 # all not standard but seen several times in the wild
3078 labels = [
3079 s_attr.get(lbl)
3080 for lbl in ('label', 'title')
3081 if str_or_none(s_attr.get(lbl))
3082 ]
3083 width = int_or_none(s_attr.get('width'))
3084 height = (int_or_none(s_attr.get('height'))
3085 or int_or_none(s_attr.get('res')))
3086 if not width or not height:
3087 for lbl in labels:
3088 resolution = parse_resolution(lbl)
3089 if not resolution:
3090 continue
3091 width = width or resolution.get('width')
3092 height = height or resolution.get('height')
3093 for lbl in labels:
3094 tbr = parse_bitrate(lbl)
3095 if tbr:
3096 break
3097 else:
3098 tbr = None
3099 f.update({
3100 'width': width,
3101 'height': height,
3102 'tbr': tbr,
3103 'format_id': s_attr.get('label') or s_attr.get('title'),
3104 })
3105 f.update(formats[0])
3106 media_info['formats'].append(f)
3107 else:
3108 media_info['formats'].extend(formats)
3109 for track_tag in re.findall(r'<track[^>]+>', media_content):
3110 track_attributes = extract_attributes(track_tag)
3111 kind = track_attributes.get('kind')
3112 if not kind or kind in ('subtitles', 'captions'):
3113 src = strip_or_none(track_attributes.get('src'))
3114 if not src:
3115 continue
3116 lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
3117 media_info['subtitles'].setdefault(lang, []).append({
3118 'url': absolute_url(src),
3119 })
3120 for f in media_info['formats']:
3121 f.setdefault('http_headers', {})['Referer'] = base_url
3122 if media_info['formats'] or media_info['subtitles']:
3123 entries.append(media_info)
3124 return entries
3125
3126 def _extract_akamai_formats(self, *args, **kwargs):
3127 fmts, subs = self._extract_akamai_formats_and_subtitles(*args, **kwargs)
3128 if subs:
3129 self._report_ignoring_subs('akamai')
3130 return fmts
3131
3132 def _extract_akamai_formats_and_subtitles(self, manifest_url, video_id, hosts={}):
3133 signed = 'hdnea=' in manifest_url
3134 if not signed:
3135 # https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
3136 manifest_url = re.sub(
3137 r'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
3138 '', manifest_url).strip('?')
3139
3140 formats = []
3141 subtitles = {}
3142
3143 hdcore_sign = 'hdcore=3.7.0'
3144 f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
3145 hds_host = hosts.get('hds')
3146 if hds_host:
3147 f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
3148 if 'hdcore=' not in f4m_url:
3149 f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
3150 f4m_formats = self._extract_f4m_formats(
3151 f4m_url, video_id, f4m_id='hds', fatal=False)
3152 for entry in f4m_formats:
3153 entry.update({'extra_param_to_segment_url': hdcore_sign})
3154 formats.extend(f4m_formats)
3155
3156 m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
3157 hls_host = hosts.get('hls')
3158 if hls_host:
3159 m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
3160 m3u8_formats, m3u8_subtitles = self._extract_m3u8_formats_and_subtitles(
3161 m3u8_url, video_id, 'mp4', 'm3u8_native',
3162 m3u8_id='hls', fatal=False)
3163 formats.extend(m3u8_formats)
3164 subtitles = self._merge_subtitles(subtitles, m3u8_subtitles)
3165
3166 http_host = hosts.get('http')
3167 if http_host and m3u8_formats and not signed:
3168 REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
3169 qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
3170 qualities_length = len(qualities)
3171 if len(m3u8_formats) in (qualities_length, qualities_length + 1):
3172 i = 0
3173 for f in m3u8_formats:
3174 if f['vcodec'] != 'none':
3175 for protocol in ('http', 'https'):
3176 http_f = f.copy()
3177 del http_f['manifest_url']
3178 http_url = re.sub(
3179 REPL_REGEX, protocol + fr'://{http_host}/\g<1>{qualities[i]}\3', f['url'])
3180 http_f.update({
3181 'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
3182 'url': http_url,
3183 'protocol': protocol,
3184 })
3185 formats.append(http_f)
3186 i += 1
3187
3188 return formats, subtitles
3189
3190 def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
3191 query = urllib.parse.urlparse(url).query
3192 url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
3193 mobj = re.search(
3194 r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
3195 url_base = mobj.group('url')
3196 http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
3197 formats = []
3198
3199 def manifest_url(manifest):
3200 m_url = f'{http_base_url}/{manifest}'
3201 if query:
3202 m_url += '?%s' % query
3203 return m_url
3204
3205 if 'm3u8' not in skip_protocols:
3206 formats.extend(self._extract_m3u8_formats(
3207 manifest_url('playlist.m3u8'), video_id, 'mp4',
3208 m3u8_entry_protocol, m3u8_id='hls', fatal=False))
3209 if 'f4m' not in skip_protocols:
3210 formats.extend(self._extract_f4m_formats(
3211 manifest_url('manifest.f4m'),
3212 video_id, f4m_id='hds', fatal=False))
3213 if 'dash' not in skip_protocols:
3214 formats.extend(self._extract_mpd_formats(
3215 manifest_url('manifest.mpd'),
3216 video_id, mpd_id='dash', fatal=False))
3217 if re.search(r'(?:/smil:|\.smil)', url_base):
3218 if 'smil' not in skip_protocols:
3219 rtmp_formats = self._extract_smil_formats(
3220 manifest_url('jwplayer.smil'),
3221 video_id, fatal=False)
3222 for rtmp_format in rtmp_formats:
3223 rtsp_format = rtmp_format.copy()
3224 rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
3225 del rtsp_format['play_path']
3226 del rtsp_format['ext']
3227 rtsp_format.update({
3228 'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
3229 'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
3230 'protocol': 'rtsp',
3231 })
3232 formats.extend([rtmp_format, rtsp_format])
3233 else:
3234 for protocol in ('rtmp', 'rtsp'):
3235 if protocol not in skip_protocols:
3236 formats.append({
3237 'url': f'{protocol}:{url_base}',
3238 'format_id': protocol,
3239 'protocol': protocol,
3240 })
3241 return formats
3242
3243 def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
3244 mobj = re.search(
3245 r'''(?s)jwplayer\s*\(\s*(?P<q>'|")(?!(?P=q)).+(?P=q)\s*\)(?!</script>).*?\.\s*setup\s*\(\s*(?P<options>(?:\([^)]*\)|[^)])+)\s*\)''',
3246 webpage)
3247 if mobj:
3248 try:
3249 jwplayer_data = self._parse_json(mobj.group('options'),
3250 video_id=video_id,
3251 transform_source=transform_source)
3252 except ExtractorError:
3253 pass
3254 else:
3255 if isinstance(jwplayer_data, dict):
3256 return jwplayer_data
3257
3258 def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
3259 jwplayer_data = self._find_jwplayer_data(
3260 webpage, video_id, transform_source=js_to_json)
3261 return self._parse_jwplayer_data(
3262 jwplayer_data, video_id, *args, **kwargs)
3263
3264 def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
3265 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3266 entries = []
3267 if not isinstance(jwplayer_data, dict):
3268 return entries
3269
3270 playlist_items = jwplayer_data.get('playlist')
3271 # JWPlayer backward compatibility: single playlist item/flattened playlists
3272 # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
3273 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
3274 if not isinstance(playlist_items, list):
3275 playlist_items = (playlist_items or jwplayer_data, )
3276
3277 for video_data in playlist_items:
3278 if not isinstance(video_data, dict):
3279 continue
3280 # JWPlayer backward compatibility: flattened sources
3281 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
3282 if 'sources' not in video_data:
3283 video_data['sources'] = [video_data]
3284
3285 this_video_id = video_id or video_data['mediaid']
3286
3287 formats = self._parse_jwplayer_formats(
3288 video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
3289 mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
3290
3291 subtitles = {}
3292 tracks = video_data.get('tracks')
3293 if tracks and isinstance(tracks, list):
3294 for track in tracks:
3295 if not isinstance(track, dict):
3296 continue
3297 track_kind = track.get('kind')
3298 if not track_kind or not isinstance(track_kind, str):
3299 continue
3300 if track_kind.lower() not in ('captions', 'subtitles'):
3301 continue
3302 track_url = urljoin(base_url, track.get('file'))
3303 if not track_url:
3304 continue
3305 subtitles.setdefault(track.get('label') or 'en', []).append({
3306 'url': self._proto_relative_url(track_url)
3307 })
3308
3309 entry = {
3310 'id': this_video_id,
3311 'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
3312 'description': clean_html(video_data.get('description')),
3313 'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
3314 'timestamp': int_or_none(video_data.get('pubdate')),
3315 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
3316 'subtitles': subtitles,
3317 'alt_title': clean_html(video_data.get('subtitle')), # attributes used e.g. by Tele5 ...
3318 'genre': clean_html(video_data.get('genre')),
3319 'channel': clean_html(dict_get(video_data, ('category', 'channel'))),
3320 'season_number': int_or_none(video_data.get('season')),
3321 'episode_number': int_or_none(video_data.get('episode')),
3322 'release_year': int_or_none(video_data.get('releasedate')),
3323 'age_limit': int_or_none(video_data.get('age_restriction')),
3324 }
3325 # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
3326 if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
3327 entry.update({
3328 '_type': 'url_transparent',
3329 'url': formats[0]['url'],
3330 })
3331 else:
3332 entry['formats'] = formats
3333 entries.append(entry)
3334 if len(entries) == 1:
3335 return entries[0]
3336 else:
3337 return self.playlist_result(entries)
3338
3339 def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
3340 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3341 urls = set()
3342 formats = []
3343 for source in jwplayer_sources_data:
3344 if not isinstance(source, dict):
3345 continue
3346 source_url = urljoin(
3347 base_url, self._proto_relative_url(source.get('file')))
3348 if not source_url or source_url in urls:
3349 continue
3350 urls.add(source_url)
3351 source_type = source.get('type') or ''
3352 ext = mimetype2ext(source_type) or determine_ext(source_url)
3353 if source_type == 'hls' or ext == 'm3u8' or 'format=m3u8-aapl' in source_url:
3354 formats.extend(self._extract_m3u8_formats(
3355 source_url, video_id, 'mp4', entry_protocol='m3u8_native',
3356 m3u8_id=m3u8_id, fatal=False))
3357 elif source_type == 'dash' or ext == 'mpd' or 'format=mpd-time-csf' in source_url:
3358 formats.extend(self._extract_mpd_formats(
3359 source_url, video_id, mpd_id=mpd_id, fatal=False))
3360 elif ext == 'smil':
3361 formats.extend(self._extract_smil_formats(
3362 source_url, video_id, fatal=False))
3363 # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
3364 elif source_type.startswith('audio') or ext in (
3365 'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
3366 formats.append({
3367 'url': source_url,
3368 'vcodec': 'none',
3369 'ext': ext,
3370 })
3371 else:
3372 format_id = str_or_none(source.get('label'))
3373 height = int_or_none(source.get('height'))
3374 if height is None and format_id:
3375 # Often no height is provided but there is a label in
3376 # format like "1080p", "720p SD", or 1080.
3377 height = parse_resolution(format_id).get('height')
3378 a_format = {
3379 'url': source_url,
3380 'width': int_or_none(source.get('width')),
3381 'height': height,
3382 'tbr': int_or_none(source.get('bitrate'), scale=1000),
3383 'filesize': int_or_none(source.get('filesize')),
3384 'ext': ext,
3385 'format_id': format_id
3386 }
3387 if source_url.startswith('rtmp'):
3388 a_format['ext'] = 'flv'
3389 # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
3390 # of jwplayer.flash.swf
3391 rtmp_url_parts = re.split(
3392 r'((?:mp4|mp3|flv):)', source_url, 1)
3393 if len(rtmp_url_parts) == 3:
3394 rtmp_url, prefix, play_path = rtmp_url_parts
3395 a_format.update({
3396 'url': rtmp_url,
3397 'play_path': prefix + play_path,
3398 })
3399 if rtmp_params:
3400 a_format.update(rtmp_params)
3401 formats.append(a_format)
3402 return formats
3403
3404 def _live_title(self, name):
3405 self._downloader.deprecation_warning('yt_dlp.InfoExtractor._live_title is deprecated and does not work as expected')
3406 return name
3407
3408 def _int(self, v, name, fatal=False, **kwargs):
3409 res = int_or_none(v, **kwargs)
3410 if res is None:
3411 msg = f'Failed to extract {name}: Could not parse value {v!r}'
3412 if fatal:
3413 raise ExtractorError(msg)
3414 else:
3415 self.report_warning(msg)
3416 return res
3417
3418 def _float(self, v, name, fatal=False, **kwargs):
3419 res = float_or_none(v, **kwargs)
3420 if res is None:
3421 msg = f'Failed to extract {name}: Could not parse value {v!r}'
3422 if fatal:
3423 raise ExtractorError(msg)
3424 else:
3425 self.report_warning(msg)
3426 return res
3427
3428 def _set_cookie(self, domain, name, value, expire_time=None, port=None,
3429 path='/', secure=False, discard=False, rest={}, **kwargs):
3430 cookie = http.cookiejar.Cookie(
3431 0, name, value, port, port is not None, domain, True,
3432 domain.startswith('.'), path, True, secure, expire_time,
3433 discard, None, None, rest)
3434 self.cookiejar.set_cookie(cookie)
3435
3436 def _get_cookies(self, url):
3437 """ Return a http.cookies.SimpleCookie with the cookies for the url """
3438 return LenientSimpleCookie(self._downloader._calc_cookies(url))
3439
3440 def _apply_first_set_cookie_header(self, url_handle, cookie):
3441 """
3442 Apply first Set-Cookie header instead of the last. Experimental.
3443
3444 Some sites (e.g. [1-3]) may serve two cookies under the same name
3445 in Set-Cookie header and expect the first (old) one to be set rather
3446 than second (new). However, as of RFC6265 the newer one cookie
3447 should be set into cookie store what actually happens.
3448 We will workaround this issue by resetting the cookie to
3449 the first one manually.
3450 1. https://new.vk.com/
3451 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3452 3. https://learning.oreilly.com/
3453 """
3454 for header, cookies in url_handle.headers.items():
3455 if header.lower() != 'set-cookie':
3456 continue
3457 cookies = cookies.encode('iso-8859-1').decode('utf-8')
3458 cookie_value = re.search(
3459 r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
3460 if cookie_value:
3461 value, domain = cookie_value.groups()
3462 self._set_cookie(domain, cookie, value)
3463 break
3464
3465 @classmethod
3466 def get_testcases(cls, include_onlymatching=False):
3467 # Do not look in super classes
3468 t = vars(cls).get('_TEST')
3469 if t:
3470 assert not hasattr(cls, '_TESTS'), f'{cls.ie_key()}IE has _TEST and _TESTS'
3471 tests = [t]
3472 else:
3473 tests = vars(cls).get('_TESTS', [])
3474 for t in tests:
3475 if not include_onlymatching and t.get('only_matching', False):
3476 continue
3477 t['name'] = cls.ie_key()
3478 yield t
3479 if getattr(cls, '__wrapped__', None):
3480 yield from cls.__wrapped__.get_testcases(include_onlymatching)
3481
3482 @classmethod
3483 def get_webpage_testcases(cls):
3484 tests = vars(cls).get('_WEBPAGE_TESTS', [])
3485 for t in tests:
3486 t['name'] = cls.ie_key()
3487 yield t
3488 if getattr(cls, '__wrapped__', None):
3489 yield from cls.__wrapped__.get_webpage_testcases()
3490
3491 @classproperty(cache=True)
3492 def age_limit(cls):
3493 """Get age limit from the testcases"""
3494 return max(traverse_obj(
3495 (*cls.get_testcases(include_onlymatching=False), *cls.get_webpage_testcases()),
3496 (..., (('playlist', 0), None), 'info_dict', 'age_limit')) or [0])
3497
3498 @classproperty(cache=True)
3499 def _RETURN_TYPE(cls):
3500 """What the extractor returns: "video", "playlist", "any", or None (Unknown)"""
3501 tests = tuple(cls.get_testcases(include_onlymatching=False))
3502 if not tests:
3503 return None
3504 elif not any(k.startswith('playlist') for test in tests for k in test):
3505 return 'video'
3506 elif all(any(k.startswith('playlist') for k in test) for test in tests):
3507 return 'playlist'
3508 return 'any'
3509
3510 @classmethod
3511 def is_single_video(cls, url):
3512 """Returns whether the URL is of a single video, None if unknown"""
3513 assert cls.suitable(url), 'The URL must be suitable for the extractor'
3514 return {'video': True, 'playlist': False}.get(cls._RETURN_TYPE)
3515
3516 @classmethod
3517 def is_suitable(cls, age_limit):
3518 """Test whether the extractor is generally suitable for the given age limit"""
3519 return not age_restricted(cls.age_limit, age_limit)
3520
3521 @classmethod
3522 def description(cls, *, markdown=True, search_examples=None):
3523 """Description of the extractor"""
3524 desc = ''
3525 if cls._NETRC_MACHINE:
3526 if markdown:
3527 desc += f' [<abbr title="netrc machine"><em>{cls._NETRC_MACHINE}</em></abbr>]'
3528 else:
3529 desc += f' [{cls._NETRC_MACHINE}]'
3530 if cls.IE_DESC is False:
3531 desc += ' [HIDDEN]'
3532 elif cls.IE_DESC:
3533 desc += f' {cls.IE_DESC}'
3534 if cls.SEARCH_KEY:
3535 desc += f'{";" if cls.IE_DESC else ""} "{cls.SEARCH_KEY}:" prefix'
3536 if search_examples:
3537 _COUNTS = ('', '5', '10', 'all')
3538 desc += f' (e.g. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
3539 if not cls.working():
3540 desc += ' (**Currently broken**)' if markdown else ' (Currently broken)'
3541
3542 # Escape emojis. Ref: https://github.com/github/markup/issues/1153
3543 name = (' - **%s**' % re.sub(r':(\w+:)', ':\u200B\\g<1>', cls.IE_NAME)) if markdown else cls.IE_NAME
3544 return f'{name}:{desc}' if desc else name
3545
3546 def extract_subtitles(self, *args, **kwargs):
3547 if (self.get_param('writesubtitles', False)
3548 or self.get_param('listsubtitles')):
3549 return self._get_subtitles(*args, **kwargs)
3550 return {}
3551
3552 def _get_subtitles(self, *args, **kwargs):
3553 raise NotImplementedError('This method must be implemented by subclasses')
3554
3555 class CommentsDisabled(Exception):
3556 """Raise in _get_comments if comments are disabled for the video"""
3557
3558 def extract_comments(self, *args, **kwargs):
3559 if not self.get_param('getcomments'):
3560 return None
3561 generator = self._get_comments(*args, **kwargs)
3562
3563 def extractor():
3564 comments = []
3565 interrupted = True
3566 try:
3567 while True:
3568 comments.append(next(generator))
3569 except StopIteration:
3570 interrupted = False
3571 except KeyboardInterrupt:
3572 self.to_screen('Interrupted by user')
3573 except self.CommentsDisabled:
3574 return {'comments': None, 'comment_count': None}
3575 except Exception as e:
3576 if self.get_param('ignoreerrors') is not True:
3577 raise
3578 self._downloader.report_error(e)
3579 comment_count = len(comments)
3580 self.to_screen(f'Extracted {comment_count} comments')
3581 return {
3582 'comments': comments,
3583 'comment_count': None if interrupted else comment_count
3584 }
3585 return extractor
3586
3587 def _get_comments(self, *args, **kwargs):
3588 raise NotImplementedError('This method must be implemented by subclasses')
3589
3590 @staticmethod
3591 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
3592 """ Merge subtitle items for one language. Items with duplicated URLs/data
3593 will be dropped. """
3594 list1_data = {(item.get('url'), item.get('data')) for item in subtitle_list1}
3595 ret = list(subtitle_list1)
3596 ret.extend(item for item in subtitle_list2 if (item.get('url'), item.get('data')) not in list1_data)
3597 return ret
3598
3599 @classmethod
3600 def _merge_subtitles(cls, *dicts, target=None):
3601 """ Merge subtitle dictionaries, language by language. """
3602 if target is None:
3603 target = {}
3604 for d in dicts:
3605 for lang, subs in d.items():
3606 target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
3607 return target
3608
3609 def extract_automatic_captions(self, *args, **kwargs):
3610 if (self.get_param('writeautomaticsub', False)
3611 or self.get_param('listsubtitles')):
3612 return self._get_automatic_captions(*args, **kwargs)
3613 return {}
3614
3615 def _get_automatic_captions(self, *args, **kwargs):
3616 raise NotImplementedError('This method must be implemented by subclasses')
3617
3618 @functools.cached_property
3619 def _cookies_passed(self):
3620 """Whether cookies have been passed to YoutubeDL"""
3621 return self.get_param('cookiefile') is not None or self.get_param('cookiesfrombrowser') is not None
3622
3623 def mark_watched(self, *args, **kwargs):
3624 if not self.get_param('mark_watched', False):
3625 return
3626 if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed:
3627 self._mark_watched(*args, **kwargs)
3628
3629 def _mark_watched(self, *args, **kwargs):
3630 raise NotImplementedError('This method must be implemented by subclasses')
3631
3632 def geo_verification_headers(self):
3633 headers = {}
3634 geo_verification_proxy = self.get_param('geo_verification_proxy')
3635 if geo_verification_proxy:
3636 headers['Ytdl-request-proxy'] = geo_verification_proxy
3637 return headers
3638
3639 @staticmethod
3640 def _generic_id(url):
3641 return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
3642
3643 def _generic_title(self, url='', webpage='', *, default=None):
3644 return (self._og_search_title(webpage, default=None)
3645 or self._html_extract_title(webpage, default=None)
3646 or urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
3647 or default)
3648
3649 @staticmethod
3650 def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
3651 all_known = all(map(
3652 lambda x: x is not None,
3653 (is_private, needs_premium, needs_subscription, needs_auth, is_unlisted)))
3654 return (
3655 'private' if is_private
3656 else 'premium_only' if needs_premium
3657 else 'subscriber_only' if needs_subscription
3658 else 'needs_auth' if needs_auth
3659 else 'unlisted' if is_unlisted
3660 else 'public' if all_known
3661 else None)
3662
3663 def _configuration_arg(self, key, default=NO_DEFAULT, *, ie_key=None, casesense=False):
3664 '''
3665 @returns A list of values for the extractor argument given by "key"
3666 or "default" if no such key is present
3667 @param default The default value to return when the key is not present (default: [])
3668 @param casesense When false, the values are converted to lower case
3669 '''
3670 ie_key = ie_key if isinstance(ie_key, str) else (ie_key or self).ie_key()
3671 val = traverse_obj(self._downloader.params, ('extractor_args', ie_key.lower(), key))
3672 if val is None:
3673 return [] if default is NO_DEFAULT else default
3674 return list(val) if casesense else [x.lower() for x in val]
3675
3676 def _yes_playlist(self, playlist_id, video_id, smuggled_data=None, *, playlist_label='playlist', video_label='video'):
3677 if not playlist_id or not video_id:
3678 return not video_id
3679
3680 no_playlist = (smuggled_data or {}).get('force_noplaylist')
3681 if no_playlist is not None:
3682 return not no_playlist
3683
3684 video_id = '' if video_id is True else f' {video_id}'
3685 playlist_id = '' if playlist_id is True else f' {playlist_id}'
3686 if self.get_param('noplaylist'):
3687 self.to_screen(f'Downloading just the {video_label}{video_id} because of --no-playlist')
3688 return False
3689 self.to_screen(f'Downloading {playlist_label}{playlist_id} - add --no-playlist to download just the {video_label}{video_id}')
3690 return True
3691
3692 def _error_or_warning(self, err, _count=None, _retries=0, *, fatal=True):
3693 RetryManager.report_retry(
3694 err, _count or int(fatal), _retries,
3695 info=self.to_screen, warn=self.report_warning, error=None if fatal else self.report_warning,
3696 sleep_func=self.get_param('retry_sleep_functions', {}).get('extractor'))
3697
3698 def RetryManager(self, **kwargs):
3699 return RetryManager(self.get_param('extractor_retries', 3), self._error_or_warning, **kwargs)
3700
3701 def _extract_generic_embeds(self, url, *args, info_dict={}, note='Extracting generic embeds', **kwargs):
3702 display_id = traverse_obj(info_dict, 'display_id', 'id')
3703 self.to_screen(f'{format_field(display_id, None, "%s: ")}{note}')
3704 return self._downloader.get_info_extractor('Generic')._extract_embeds(
3705 smuggle_url(url, {'block_ies': [self.ie_key()]}), *args, **kwargs)
3706
3707 @classmethod
3708 def extract_from_webpage(cls, ydl, url, webpage):
3709 ie = (cls if isinstance(cls._extract_from_webpage, types.MethodType)
3710 else ydl.get_info_extractor(cls.ie_key()))
3711 for info in ie._extract_from_webpage(url, webpage) or []:
3712 # url = None since we do not want to set (webpage/original)_url
3713 ydl.add_default_extra_info(info, ie, None)
3714 yield info
3715
3716 @classmethod
3717 def _extract_from_webpage(cls, url, webpage):
3718 for embed_url in orderedSet(
3719 cls._extract_embed_urls(url, webpage) or [], lazy=True):
3720 yield cls.url_result(embed_url, None if cls._VALID_URL is False else cls)
3721
3722 @classmethod
3723 def _extract_embed_urls(cls, url, webpage):
3724 """@returns all the embed urls on the webpage"""
3725 if '_EMBED_URL_RE' not in cls.__dict__:
3726 assert isinstance(cls._EMBED_REGEX, (list, tuple))
3727 for idx, regex in enumerate(cls._EMBED_REGEX):
3728 assert regex.count('(?P<url>') == 1, \
3729 f'{cls.__name__}._EMBED_REGEX[{idx}] must have exactly 1 url group\n\t{regex}'
3730 cls._EMBED_URL_RE = tuple(map(re.compile, cls._EMBED_REGEX))
3731
3732 for regex in cls._EMBED_URL_RE:
3733 for mobj in regex.finditer(webpage):
3734 embed_url = urllib.parse.urljoin(url, unescapeHTML(mobj.group('url')))
3735 if cls._VALID_URL is False or cls.suitable(embed_url):
3736 yield embed_url
3737
3738 class StopExtraction(Exception):
3739 pass
3740
3741 @classmethod
3742 def _extract_url(cls, webpage): # TODO: Remove
3743 """Only for compatibility with some older extractors"""
3744 return next(iter(cls._extract_embed_urls(None, webpage) or []), None)
3745
3746 @classmethod
3747 def __init_subclass__(cls, *, plugin_name=None, **kwargs):
3748 if plugin_name:
3749 mro = inspect.getmro(cls)
3750 super_class = cls.__wrapped__ = mro[mro.index(cls) + 1]
3751 cls.PLUGIN_NAME, cls.ie_key = plugin_name, super_class.ie_key
3752 cls.IE_NAME = f'{super_class.IE_NAME}+{plugin_name}'
3753 while getattr(super_class, '__wrapped__', None):
3754 super_class = super_class.__wrapped__
3755 setattr(sys.modules[super_class.__module__], super_class.__name__, cls)
3756 _PLUGIN_OVERRIDES[super_class].append(cls)
3757
3758 return super().__init_subclass__(**kwargs)
3759
3760
3761 class SearchInfoExtractor(InfoExtractor):
3762 """
3763 Base class for paged search queries extractors.
3764 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
3765 Instances should define _SEARCH_KEY and optionally _MAX_RESULTS
3766 """
3767
3768 _MAX_RESULTS = float('inf')
3769 _RETURN_TYPE = 'playlist'
3770
3771 @classproperty
3772 def _VALID_URL(cls):
3773 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
3774
3775 def _real_extract(self, query):
3776 prefix, query = self._match_valid_url(query).group('prefix', 'query')
3777 if prefix == '':
3778 return self._get_n_results(query, 1)
3779 elif prefix == 'all':
3780 return self._get_n_results(query, self._MAX_RESULTS)
3781 else:
3782 n = int(prefix)
3783 if n <= 0:
3784 raise ExtractorError(f'invalid download number {n} for query "{query}"')
3785 elif n > self._MAX_RESULTS:
3786 self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
3787 n = self._MAX_RESULTS
3788 return self._get_n_results(query, n)
3789
3790 def _get_n_results(self, query, n):
3791 """Get a specified number of results for a query.
3792 Either this function or _search_results must be overridden by subclasses """
3793 return self.playlist_result(
3794 itertools.islice(self._search_results(query), 0, None if n == float('inf') else n),
3795 query, query)
3796
3797 def _search_results(self, query):
3798 """Returns an iterator of search results"""
3799 raise NotImplementedError('This method must be implemented by subclasses')
3800
3801 @classproperty
3802 def SEARCH_KEY(cls):
3803 return cls._SEARCH_KEY
3804
3805
3806 class UnsupportedURLIE(InfoExtractor):
3807 _VALID_URL = '.*'
3808 _ENABLED = False
3809 IE_DESC = False
3810
3811 def _real_extract(self, url):
3812 raise UnsupportedError(url)
3813
3814
3815 _PLUGIN_OVERRIDES = collections.defaultdict(list)