]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/common.py
[extractors] Use new framework for existing embeds (#4307)
[yt-dlp.git] / yt_dlp / extractor / common.py
index 4c37044f67f1eda4df2cbe289824c4819c754e01..a6933e738b1c41316f76694d62e87853ac619ad3 100644 (file)
 import netrc
 import os
 import random
+import re
 import sys
 import time
+import types
+import urllib.parse
 import urllib.request
 import xml.etree.ElementTree
 
-from ..compat import functools, re  # isort: split
-from ..compat import (
-    compat_etree_fromstring,
-    compat_expanduser,
-    compat_os_name,
-    compat_str,
-    compat_urllib_parse_unquote,
-    compat_urllib_parse_urlencode,
-    compat_urlparse,
-)
+from ..compat import functools  # isort: split
+from ..compat import compat_etree_fromstring, compat_expanduser, compat_os_name
 from ..downloader import FileDownloader
 from ..downloader.f4m import get_base_url, remove_encrypted_media
 from ..utils import (
+    IDENTITY,
     JSON_LD_RE,
     NO_DEFAULT,
     ExtractorError,
     parse_m3u8_attributes,
     parse_resolution,
     sanitize_filename,
+    sanitize_url,
     sanitized_Request,
     str_or_none,
     str_to_int,
     strip_or_none,
     traverse_obj,
+    try_call,
     try_get,
     unescapeHTML,
     unified_strdate,
@@ -317,7 +315,8 @@ class InfoExtractor:
                     live stream that goes on instead of a fixed-length video.
     was_live:       True, False, or None (=unknown). Whether this video was
                     originally a live stream.
-    live_status:    'is_live', 'is_upcoming', 'was_live', 'not_live' or None (=unknown)
+    live_status:    None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live'
+                    or 'post_live' (was live, but VOD is not yet processed)
                     If absent, automatically set from is_live, was_live
     start_time:     Time in seconds where the reproduction should start, as
                     specified in the URL.
@@ -335,6 +334,7 @@ class InfoExtractor:
                     'private', 'premium_only', 'subscriber_only', 'needs_auth',
                     'unlisted' or 'public'. Use 'InfoExtractor._availability'
                     to set it
+    _old_archive_ids: A list of old archive ids needed for backward compatibility
     __post_extractor: A function to be called just before the metadata is
                     written to either disk, logger or console. The function
                     must return a dict which will be added to the info_dict.
@@ -389,6 +389,10 @@ class InfoExtractor:
     section_start:  Start time of the section in seconds
     section_end:    End time of the section in seconds
 
+    The following fields should only be set for storyboards:
+    rows:           Number of rows in each storyboard fragment, as an integer
+    columns:        Number of columns in each storyboard fragment, as an integer
+
     Unless mentioned otherwise, the fields should be Unicode strings.
 
     Unless mentioned otherwise, None is equivalent to absence of information.
@@ -398,7 +402,7 @@ class InfoExtractor:
     There must be a key "entries", which is a list, an iterable, or a PagedList
     object, each element of which is a valid dictionary by this specification.
 
-    Additionally, playlists can have "id", "title", and any other relevent
+    Additionally, playlists can have "id", "title", and any other relevant
     attributes with the same semantics as videos (see above).
 
     It can also have the following optional fields:
@@ -431,14 +435,26 @@ class InfoExtractor:
     title, description etc.
 
 
-    Subclasses of this should define a _VALID_URL regexp and, re-define the
-    _real_extract() and (optionally) _real_initialize() methods.
-    Probably, they should also be added to the list of extractors.
+    Subclasses of this should also be added to the list of extractors and
+    should define a _VALID_URL regexp and, re-define the _real_extract() and
+    (optionally) _real_initialize() methods.
 
     Subclasses may also override suitable() if necessary, but ensure the function
     signature is preserved and that this function imports everything it needs
     (except other extractors), so that lazy_extractors works correctly.
 
+    Subclasses can define a list of _EMBED_REGEX, which will be searched for in
+    the HTML of Generic webpages. It may also override _extract_embed_urls
+    or _extract_from_webpage as necessary. While these are normally classmethods,
+    _extract_from_webpage is allowed to be an instance method.
+
+    _extract_from_webpage may raise self.StopExtraction() to stop further
+    processing of the webpage and obtain exclusive rights to it. This is useful
+    when the extractor cannot reliably be matched using just the URL.
+    Eg: invidious/peertube instances
+
+    Embed-only extractors can be defined by setting _VALID_URL = False.
+
     To support username + password (or netrc) login, the extractor must define a
     _NETRC_MACHINE and re-define _perform_login(username, password) and
     (optionally) _initialize_pre_login() methods. The _perform_login method will
@@ -476,6 +492,8 @@ class InfoExtractor:
     _NETRC_MACHINE = None
     IE_DESC = None
     SEARCH_KEY = None
+    _VALID_URL = None
+    _EMBED_REGEX = []
 
     def _login_hint(self, method=NO_DEFAULT, netrc=None):
         password_hint = f'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
@@ -499,12 +517,12 @@ def __init__(self, downloader=None):
 
     @classmethod
     def _match_valid_url(cls, url):
+        if cls._VALID_URL is False:
+            return None
         # This does not use has/getattr intentionally - we want to know whether
         # we have cached the regexp for *this* class, whereas getattr would also
         # match the superclass
         if '_VALID_URL_RE' not in cls.__dict__:
-            if '_VALID_URL' not in cls.__dict__:
-                cls._VALID_URL = cls._make_valid_url()
             cls._VALID_URL_RE = re.compile(cls._VALID_URL)
         return cls._VALID_URL_RE.match(url)
 
@@ -648,10 +666,10 @@ def extract(self, url):
                         return None
                     if self._x_forwarded_for_ip:
                         ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
-                    subtitles = ie_result.get('subtitles')
-                    if (subtitles and 'live_chat' in subtitles
-                            and 'no-live-chat' in self.get_param('compat_opts', [])):
-                        del subtitles['live_chat']
+                    subtitles = ie_result.get('subtitles') or {}
+                    if 'no-live-chat' in self.get_param('compat_opts'):
+                        for lang in ('live_chat', 'comments', 'danmaku'):
+                            subtitles.pop(lang, None)
                     return ie_result
                 except GeoRestrictedError as e:
                     if self.__maybe_fake_ip_and_retry(e.countries):
@@ -703,7 +721,7 @@ def cookiejar(self):
         return self._downloader.cookiejar
 
     def _initialize_pre_login(self):
-        """ Intialization before login. Redefine in subclasses."""
+        """ Initialization before login. Redefine in subclasses."""
         pass
 
     def _perform_login(self, username, password):
@@ -737,14 +755,14 @@ def __can_accept_status_code(err, expected_status):
         else:
             return err.code in variadic(expected_status)
 
-    def _create_request(self, url_or_request, data=None, headers={}, query={}):
+    def _create_request(self, url_or_request, data=None, headers=None, query=None):
         if isinstance(url_or_request, urllib.request.Request):
             return update_Request(url_or_request, data=data, headers=headers, query=query)
         if query:
             url_or_request = update_url_query(url_or_request, query)
-        return sanitized_Request(url_or_request, data, headers)
+        return sanitized_Request(url_or_request, data, headers or {})
 
-    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
+    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers=None, query=None, expected_status=None):
         """
         Return the response handle.
 
@@ -772,8 +790,8 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fa
         # geo unrestricted country. We will do so once we encounter any
         # geo restriction error.
         if self._x_forwarded_for_ip:
-            if 'X-Forwarded-For' not in headers:
-                headers['X-Forwarded-For'] = self._x_forwarded_for_ip
+            headers = (headers or {}).copy()
+            headers.setdefault('X-Forwarded-For', self._x_forwarded_for_ip)
 
         try:
             return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
@@ -834,7 +852,7 @@ def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=
         """
 
         # Strip hashes from the URL (#1038)
-        if isinstance(url_or_request, (compat_str, str)):
+        if isinstance(url_or_request, str):
             url_or_request = url_or_request.partition('#')[0]
 
         urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
@@ -931,39 +949,37 @@ def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errno
 
         return content
 
-    def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
+    def __print_error(self, errnote, fatal, video_id, err):
+        if fatal:
+            raise ExtractorError(f'{video_id}: {errnote}', cause=err)
+        elif errnote:
+            self.report_warning(f'{video_id}: {errnote}: {err}')
+
+    def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True, errnote=None):
         if transform_source:
             xml_string = transform_source(xml_string)
         try:
             return compat_etree_fromstring(xml_string.encode('utf-8'))
         except xml.etree.ElementTree.ParseError as ve:
-            errmsg = '%s: Failed to parse XML ' % video_id
-            if fatal:
-                raise ExtractorError(errmsg, cause=ve)
-            else:
-                self.report_warning(errmsg + str(ve))
+            self.__print_error('Failed to parse XML' if errnote is None else errnote, fatal, video_id, ve)
 
-    def _parse_json(self, json_string, video_id, transform_source=None, fatal=True, **parser_kwargs):
+    def _parse_json(self, json_string, video_id, transform_source=None, fatal=True, errnote=None, **parser_kwargs):
         try:
             return json.loads(
                 json_string, cls=LenientJSONDecoder, strict=False, transform_source=transform_source, **parser_kwargs)
         except ValueError as ve:
-            errmsg = f'{video_id}: Failed to parse JSON'
-            if fatal:
-                raise ExtractorError(errmsg, cause=ve)
-            else:
-                self.report_warning(f'{errmsg}: {ve}')
+            self.__print_error('Failed to parse JSON' if errnote is None else errnote, fatal, video_id, ve)
 
-    def _parse_socket_response_as_json(self, data, video_id, transform_source=None, fatal=True):
-        return self._parse_json(
-            data[data.find('{'):data.rfind('}') + 1],
-            video_id, transform_source, fatal)
+    def _parse_socket_response_as_json(self, data, *args, **kwargs):
+        return self._parse_json(data[data.find('{'):data.rfind('}') + 1], *args, **kwargs)
 
     def __create_download_methods(name, parser, note, errnote, return_value):
 
-        def parse(ie, content, *args, **kwargs):
+        def parse(ie, content, *args, errnote=errnote, **kwargs):
             if parser is None:
                 return content
+            if errnote is False:
+                kwargs['errnote'] = errnote
             # parser is fetched by name so subclasses can override it
             return getattr(ie, parser)(content, *args, **kwargs)
 
@@ -975,7 +991,7 @@ def download_handle(self, url_or_request, video_id, note=note, errnote=errnote,
             if res is False:
                 return res
             content, urlh = res
-            return parse(self, content, video_id, transform_source=transform_source, fatal=fatal), urlh
+            return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote), urlh
 
         def download_content(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
                              fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
@@ -990,7 +1006,7 @@ def download_content(self, url_or_request, video_id, note=note, errnote=errnote,
                     self.report_warning(f'Unable to load request from disk: {e}')
                 else:
                     content = self.__decode_webpage(webpage_bytes, encoding, url_or_request.headers)
-                    return parse(self, content, video_id, transform_source, fatal)
+                    return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote)
             kwargs = {
                 'note': note,
                 'errnote': errnote,
@@ -1145,10 +1161,12 @@ def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent
             'url': url,
         }
 
-    def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None, video_kwargs=None, **kwargs):
-        urls = (self.url_result(self._proto_relative_url(m), ie, **(video_kwargs or {}))
-                for m in orderedSet(map(getter, matches) if getter else matches))
-        return self.playlist_result(urls, playlist_id, playlist_title, **kwargs)
+    @classmethod
+    def playlist_from_matches(cls, matches, playlist_id=None, playlist_title=None,
+                              getter=IDENTITY, ie=None, video_kwargs=None, **kwargs):
+        return cls.playlist_result(
+            (cls.url_result(m, ie, **(video_kwargs or {})) for m in orderedSet(map(getter, matches), lazy=True)),
+            playlist_id, playlist_title, **kwargs)
 
     @staticmethod
     def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
@@ -1355,12 +1373,20 @@ def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs
     def _dc_search_uploader(self, html):
         return self._html_search_meta('dc.creator', html, 'uploader')
 
-    def _rta_search(self, html):
+    @staticmethod
+    def _rta_search(html):
         # See http://www.rtalabel.org/index.php?content=howtofaq#single
         if re.search(r'(?ix)<meta\s+name="rating"\s+'
                      r'     content="RTA-5042-1996-1400-1577-RTA"',
                      html):
             return 18
+
+        # And then there are the jokers who advertise that they use RTA, but actually don't.
+        AGE_LIMIT_MARKERS = [
+            r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
+        ]
+        if any(re.search(marker, html) for marker in AGE_LIMIT_MARKERS):
+            return 18
         return 0
 
     def _media_rating_search(self, html):
@@ -1427,7 +1453,7 @@ def _search_json_ld(self, html, video_id, expected_type=None, *, fatal=True, def
             return {}
 
     def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
-        if isinstance(json_ld, compat_str):
+        if isinstance(json_ld, str):
             json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
         if not json_ld:
             return {}
@@ -1508,7 +1534,7 @@ def extract_video_object(e):
                 'url': url_or_none(e.get('contentUrl')),
                 'title': unescapeHTML(e.get('name')),
                 'description': unescapeHTML(e.get('description')),
-                'thumbnails': [{'url': url}
+                'thumbnails': [{'url': unescapeHTML(url)}
                                for url in variadic(traverse_obj(e, 'thumbnailUrl', 'thumbnailURL'))
                                if url_or_none(url)],
                 'duration': parse_duration(e.get('duration')),
@@ -1517,7 +1543,7 @@ def extract_video_object(e):
                 # both types can have 'name' property(inherited from 'Thing' type). [1]
                 # however some websites are using 'Text' type instead.
                 # 1. https://schema.org/VideoObject
-                'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
+                'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, str) else None,
                 'filesize': int_or_none(float_or_none(e.get('contentSize'))),
                 'tbr': int_or_none(e.get('bitrate')),
                 'width': int_or_none(e.get('width')),
@@ -1967,14 +1993,9 @@ def http_scheme(self):
             else 'https:')
 
     def _proto_relative_url(self, url, scheme=None):
-        if url is None:
-            return url
-        if url.startswith('//'):
-            if scheme is None:
-                scheme = self.http_scheme()
-            return scheme + url
-        else:
-            return url
+        scheme = scheme or self.http_scheme()
+        assert scheme.endswith(':')
+        return sanitize_url(url, scheme=scheme[:-1])
 
     def _sleep(self, timeout, video_id, msg_template=None):
         if msg_template is None:
@@ -2166,7 +2187,7 @@ def _parse_m3u8_formats_and_subtitles(
         ]), m3u8_doc)
 
         def format_url(url):
-            return url if re.match(r'^https?://', url) else compat_urlparse.urljoin(m3u8_url, url)
+            return url if re.match(r'^https?://', url) else urllib.parse.urljoin(m3u8_url, url)
 
         if self.get_param('hls_split_discontinuity', False):
             def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
@@ -2539,7 +2560,7 @@ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_para
                     })
                 continue
 
-            src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
+            src_url = src if src.startswith('http') else urllib.parse.urljoin(base, src)
             src_url = src_url.strip()
 
             if proto == 'm3u8' or src_ext == 'm3u8':
@@ -2562,7 +2583,7 @@ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_para
                         'plugin': 'flowplayer-3.2.0.1',
                     }
                 f4m_url += '&' if '?' in f4m_url else '?'
-                f4m_url += compat_urllib_parse_urlencode(f4m_params)
+                f4m_url += urllib.parse.urlencode(f4m_params)
                 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
             elif src_ext == 'mpd':
                 formats.extend(self._extract_mpd_formats(
@@ -2827,12 +2848,12 @@ def extract_Initialization(source):
                     base_url = ''
                     for element in (representation, adaptation_set, period, mpd_doc):
                         base_url_e = element.find(_add_ns('BaseURL'))
-                        if base_url_e is not None:
+                        if try_call(lambda: base_url_e.text) is not None:
                             base_url = base_url_e.text + base_url
                             if re.match(r'^https?://', base_url):
                                 break
                     if mpd_base_url and base_url.startswith('/'):
-                        base_url = compat_urlparse.urljoin(mpd_base_url, base_url)
+                        base_url = urllib.parse.urljoin(mpd_base_url, base_url)
                     elif mpd_base_url and not re.match(r'^https?://', base_url):
                         if not mpd_base_url.endswith('/'):
                             mpd_base_url += '/'
@@ -3102,7 +3123,7 @@ def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
                 sampling_rate = int_or_none(track.get('SamplingRate'))
 
                 track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
-                track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
+                track_url_pattern = urllib.parse.urljoin(ism_url, track_url_pattern)
 
                 fragments = []
                 fragment_ctx = {
@@ -3121,7 +3142,7 @@ def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
                         fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
                     for _ in range(fragment_repeat):
                         fragments.append({
-                            'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
+                            'url': re.sub(r'{start[ _]time}', str(fragment_ctx['time']), track_url_pattern),
                             'duration': fragment_ctx['duration'] / stream_timescale,
                         })
                         fragment_ctx['time'] += fragment_ctx['duration']
@@ -3214,7 +3235,7 @@ def _media_formats(src, cur_media_type, type_info=None):
 
         entries = []
         # amp-video and amp-audio are very similar to their HTML5 counterparts
-        # so we wll include them right here (see
+        # so we will include them right here (see
         # https://www.ampproject.org/docs/reference/components/amp-video)
         # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
         _MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
@@ -3365,7 +3386,7 @@ def _extract_akamai_formats_and_subtitles(self, manifest_url, video_id, hosts={}
         return formats, subtitles
 
     def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
-        query = compat_urlparse.urlparse(url).query
+        query = urllib.parse.urlparse(url).query
         url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
         mobj = re.search(
             r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
@@ -3471,7 +3492,7 @@ def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
                     if not isinstance(track, dict):
                         continue
                     track_kind = track.get('kind')
-                    if not track_kind or not isinstance(track_kind, compat_str):
+                    if not track_kind or not isinstance(track_kind, str):
                         continue
                     if track_kind.lower() not in ('captions', 'subtitles'):
                         continue
@@ -3544,7 +3565,7 @@ def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
                     # Often no height is provided but there is a label in
                     # format like "1080p", "720p SD", or 1080.
                     height = int_or_none(self._search_regex(
-                        r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
+                        r'^(\d{3,4})[pP]?(?:\b|$)', str(source.get('label') or ''),
                         'height', default=None))
                 a_format = {
                     'url': source_url,
@@ -3645,11 +3666,18 @@ def get_testcases(cls, include_onlymatching=False):
             t['name'] = cls.ie_key()
             yield t
 
+    @classmethod
+    def get_webpage_testcases(cls):
+        tests = getattr(cls, '_WEBPAGE_TESTS', [])
+        for t in tests:
+            t['name'] = cls.ie_key()
+        return tests
+
     @classproperty
     def age_limit(cls):
         """Get age limit from the testcases"""
         return max(traverse_obj(
-            tuple(cls.get_testcases(include_onlymatching=False)),
+            (*cls.get_testcases(include_onlymatching=False), *cls.get_webpage_testcases()),
             (..., (('playlist', 0), None), 'info_dict', 'age_limit')) or [0])
 
     @classmethod
@@ -3769,11 +3797,13 @@ def geo_verification_headers(self):
             headers['Ytdl-request-proxy'] = geo_verification_proxy
         return headers
 
-    def _generic_id(self, url):
-        return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
+    @staticmethod
+    def _generic_id(url):
+        return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
 
-    def _generic_title(self, url):
-        return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
+    @staticmethod
+    def _generic_title(url):
+        return urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
 
     @staticmethod
     def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
@@ -3818,6 +3848,45 @@ def _yes_playlist(self, playlist_id, video_id, smuggled_data=None, *, playlist_l
         self.to_screen(f'Downloading {playlist_label}{playlist_id} - add --no-playlist to download just the {video_label}{video_id}')
         return True
 
+    @classmethod
+    def extract_from_webpage(cls, ydl, url, webpage):
+        ie = (cls if isinstance(cls._extract_from_webpage, types.MethodType)
+              else ydl.get_info_extractor(cls.ie_key()))
+        for info in ie._extract_from_webpage(url, webpage) or []:
+            # url = None since we do not want to set (webpage/original)_url
+            ydl.add_default_extra_info(info, ie, None)
+            yield info
+
+    @classmethod
+    def _extract_from_webpage(cls, url, webpage):
+        for embed_url in orderedSet(
+                cls._extract_embed_urls(url, webpage) or [], lazy=True):
+            yield cls.url_result(embed_url, cls)
+
+    @classmethod
+    def _extract_embed_urls(cls, url, webpage):
+        """@returns all the embed urls on the webpage"""
+        if '_EMBED_URL_RE' not in cls.__dict__:
+            assert isinstance(cls._EMBED_REGEX, (list, tuple))
+            for idx, regex in enumerate(cls._EMBED_REGEX):
+                assert regex.count('(?P<url>') == 1, \
+                    f'{cls.__name__}._EMBED_REGEX[{idx}] must have exactly 1 url group\n\t{regex}'
+            cls._EMBED_URL_RE = tuple(map(re.compile, cls._EMBED_REGEX))
+
+        for regex in cls._EMBED_URL_RE:
+            for mobj in regex.finditer(webpage):
+                embed_url = urllib.parse.urljoin(url, unescapeHTML(mobj.group('url')))
+                if cls._VALID_URL is False or cls.suitable(embed_url):
+                    yield embed_url
+
+    class StopExtraction(Exception):
+        pass
+
+    @classmethod
+    def _extract_url(cls, webpage):  # TODO: Remove
+        """Only for compatibility with some older extractors"""
+        return next(iter(cls._extract_embed_urls(None, webpage) or []), None)
+
 
 class SearchInfoExtractor(InfoExtractor):
     """
@@ -3828,8 +3897,8 @@ class SearchInfoExtractor(InfoExtractor):
 
     _MAX_RESULTS = float('inf')
 
-    @classmethod
-    def _make_valid_url(cls):
+    @classproperty
+    def _VALID_URL(cls):
         return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
 
     def _real_extract(self, query):