]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/mediasite.py
[extractor] Deprecate `_sort_formats`
[yt-dlp.git] / yt_dlp / extractor / mediasite.py
index c18b16eb3fcb9e010aa495e3bd8b3271992340db..fe549c49fb430b6ca462dfceb6c2355df4103c3f 100644 (file)
@@ -1,6 +1,3 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
 import re
 import json
 
@@ -14,8 +11,9 @@
     float_or_none,
     mimetype2ext,
     str_or_none,
+    try_call,
     try_get,
-    unescapeHTML,
+    smuggle_url,
     unsmuggle_url,
     url_or_none,
     urljoin,
@@ -26,7 +24,8 @@
 
 
 class MediasiteIE(InfoExtractor):
-    _VALID_URL = r'(?xi)https?://[^/]+/Mediasite/(?:Play|Showcase/(?:default|livebroadcast)/Presentation)/(?P<id>%s)(?P<query>\?[^#]+|)' % _ID_RE
+    _VALID_URL = r'(?xi)https?://[^/]+/Mediasite/(?:Play|Showcase/[^/#?]+/Presentation)/(?P<id>%s)(?P<query>\?[^#]+|)' % _ID_RE
+    _EMBED_REGEX = [r'(?xi)<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:(?:https?:)?//[^/]+)?/Mediasite/Play/%s(?:\?.*?)?)\1' % _ID_RE]
     _TESTS = [
         {
             'url': 'https://hitsmediaweb.h-its.org/mediasite/Play/2db6c271681e4f199af3c60d1f82869b1d',
@@ -114,17 +113,60 @@ class MediasiteIE(InfoExtractor):
         5: 'video3',
     }
 
-    @staticmethod
-    def _extract_urls(webpage):
-        return [
-            unescapeHTML(mobj.group('url'))
-            for mobj in re.finditer(
-                r'(?xi)<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:(?:https?:)?//[^/]+)?/Mediasite/Play/%s(?:\?.*?)?)\1' % _ID_RE,
-                webpage)]
+    @classmethod
+    def _extract_embed_urls(cls, url, webpage):
+        for embed_url in super()._extract_embed_urls(url, webpage):
+            yield smuggle_url(embed_url, {'UrlReferrer': url})
+
+    def __extract_slides(self, *, stream_id, snum, Stream, duration, images):
+        slide_base_url = Stream['SlideBaseUrl']
+
+        fname_template = Stream['SlideImageFileNameTemplate']
+        if fname_template != 'slide_{0:D4}.jpg':
+            self.report_warning('Unusual slide file name template; report a bug if slide downloading fails')
+        fname_template = re.sub(r'\{0:D([0-9]+)\}', r'{0:0\1}', fname_template)
+
+        fragments = []
+        for i, slide in enumerate(Stream['Slides']):
+            if i == 0:
+                if slide['Time'] > 0:
+                    default_slide = images.get('DefaultSlide')
+                    if default_slide is None:
+                        default_slide = images.get('DefaultStreamImage')
+                    if default_slide is not None:
+                        default_slide = default_slide['ImageFilename']
+                    if default_slide is not None:
+                        fragments.append({
+                            'path': default_slide,
+                            'duration': slide['Time'] / 1000,
+                        })
+
+            next_time = try_call(
+                lambda: Stream['Slides'][i + 1]['Time'],
+                lambda: duration,
+                lambda: slide['Time'],
+                expected_type=(int, float))
+
+            fragments.append({
+                'path': fname_template.format(slide.get('Number', i + 1)),
+                'duration': (next_time - slide['Time']) / 1000
+            })
+
+        return {
+            'format_id': '%s-%u.slides' % (stream_id, snum),
+            'ext': 'mhtml',
+            'url': slide_base_url,
+            'protocol': 'mhtml',
+            'acodec': 'none',
+            'vcodec': 'none',
+            'format_note': 'Slides',
+            'fragments': fragments,
+            'fragment_base_url': slide_base_url,
+        }
 
     def _real_extract(self, url):
         url, data = unsmuggle_url(url, {})
-        mobj = re.match(self._VALID_URL, url)
+        mobj = self._match_valid_url(url)
         resource_id = mobj.group('id')
         query = mobj.group('query')
 
@@ -198,10 +240,15 @@ def _real_extract(self, url):
                         'ext': mimetype2ext(VideoUrl.get('MimeType')),
                     })
 
-            # TODO: if Stream['HasSlideContent']:
-            # synthesise an MJPEG video stream '%s-%u.slides' % (stream_type, snum)
-            # from Stream['Slides']
-            # this will require writing a custom downloader...
+            if Stream.get('HasSlideContent', False):
+                images = player_options['PlayerLayoutOptions']['Images']
+                stream_formats.append(self.__extract_slides(
+                    stream_id=stream_id,
+                    snum=snum,
+                    Stream=Stream,
+                    duration=presentation.get('Duration'),
+                    images=images,
+                ))
 
             # disprefer 'secondary' streams
             if stream_type != 0:
@@ -217,8 +264,6 @@ def _real_extract(self, url):
                 })
             formats.extend(stream_formats)
 
-        self._sort_formats(formats)
-
         # XXX: Presentation['Presenters']
         # XXX: Presentation['Transcript']
 
@@ -276,7 +321,7 @@ class MediasiteCatalogIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
+        mobj = self._match_valid_url(url)
         mediasite_url = mobj.group('url')
         catalog_id = mobj.group('catalog_id')
         current_folder_id = mobj.group('current_folder_id') or catalog_id
@@ -352,7 +397,7 @@ class MediasiteNamedCatalogIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
+        mobj = self._match_valid_url(url)
         mediasite_url = mobj.group('url')
         catalog_name = mobj.group('catalog_name')