]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/reddit.py
[extractor/adobepass] Handle `Charter_Direct` MSO as `Spectrum` (#6824)
[yt-dlp.git] / yt_dlp / extractor / reddit.py
index aabc8dba9b8d3a5111e97ceeac05c5e3f8cb334d..9dba3eca8f58b83e8f7d0ad933265a6096f735de 100644 (file)
@@ -1,20 +1,20 @@
 import random
-from urllib.parse import urlparse
+import urllib.parse
 
 from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
-    int_or_none,
     float_or_none,
+    int_or_none,
+    traverse_obj,
     try_get,
     unescapeHTML,
     url_or_none,
-    traverse_obj
 )
 
 
 class RedditIE(InfoExtractor):
-    _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'
+    _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/(?P<slug>(?:r|user)/[^/]+/comments/(?P<id>[^/?#&]+))'
     _TESTS = [{
         'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
         'info_dict': {
@@ -32,10 +32,83 @@ class RedditIE(InfoExtractor):
             'dislike_count': int,
             'comment_count': int,
             'age_limit': 0,
+            'channel_id': 'videos',
+        },
+        'params': {
+            'skip_download': True,
+        },
+    }, {
+        # 1080p fallback format
+        'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
+        'md5': '8b5902cfda3006bf90faea7adf765a49',
+        'info_dict': {
+            'id': 'gyh95hiqc0b11',
+            'ext': 'mp4',
+            'display_id': '90bu6w',
+            'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
+            'thumbnail': r're:^https?://.*\.(?:jpg|png)',
+            'thumbnails': 'count:7',
+            'timestamp': 1532051078,
+            'upload_date': '20180720',
+            'uploader': 'FootLoosePickleJuice',
+            'duration': 14,
+            'like_count': int,
+            'dislike_count': int,
+            'comment_count': int,
+            'age_limit': 0,
+            'channel_id': 'aww',
+        },
+    }, {
+        # User post
+        'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
+        'info_dict': {
+            'id': 'zasobba6wp071',
+            'ext': 'mp4',
+            'display_id': 'nip71r',
+            'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
+            'thumbnail': r're:^https?://.*\.(?:jpg|png)',
+            'thumbnails': 'count:5',
+            'timestamp': 1621709093,
+            'upload_date': '20210522',
+            'uploader': 'creepyt0es',
+            'duration': 6,
+            'like_count': int,
+            'dislike_count': int,
+            'comment_count': int,
+            'age_limit': 0,
+            'channel_id': 'u_creepyt0es',
         },
         'params': {
             'skip_download': True,
         },
+    }, {
+        # videos embedded in reddit text post
+        'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
+        'playlist_count': 2,
+        'info_dict': {
+            'id': 'wzqkxp',
+            'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
+        },
+    }, {
+        # crossposted reddit-hosted media
+        'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
+        'md5': '746180895c7b75a9d6b05341f507699a',
+        'info_dict': {
+            'id': 'a1oneun6pa5a1',
+            'ext': 'mp4',
+            'display_id': 'zjjw82',
+            'title': 'Cringe',
+            'uploader': 'Otaku-senpai69420',
+            'thumbnail': r're:^https?://.*\.(?:jpg|png)',
+            'upload_date': '20221212',
+            'timestamp': 1670812309,
+            'duration': 16,
+            'like_count': int,
+            'dislike_count': int,
+            'comment_count': int,
+            'age_limit': 0,
+            'channel_id': 'dumbfuckers_club',
+        },
     }, {
         'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
         'only_matching': True,
@@ -75,17 +148,13 @@ def _real_extract(self, url):
 
         self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())
         self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
-        data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)
+        data = self._download_json(f'https://{subdomain}reddit.com/{slug}/.json', video_id, fatal=False)
         if not data:
             # Fall back to old.reddit.com in case the requested subdomain fails
-            data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)
+            data = self._download_json(f'https://old.reddit.com/{slug}/.json', video_id)
         data = data[0]['data']['children'][0]['data']
         video_url = data['url']
 
-        # Avoid recursing into the same reddit URL
-        if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
-            raise ExtractorError('No media found', expected=True)
-
         over_18 = data.get('over_18')
         if over_18 is True:
             age_limit = 18
@@ -122,14 +191,42 @@ def add_thumbnail(src):
             'thumbnails': thumbnails,
             'timestamp': float_or_none(data.get('created_utc')),
             'uploader': data.get('author'),
+            'channel_id': data.get('subreddit'),
             'like_count': int_or_none(data.get('ups')),
             'dislike_count': int_or_none(data.get('downs')),
             'comment_count': int_or_none(data.get('num_comments')),
             'age_limit': age_limit,
         }
 
+        parsed_url = urllib.parse.urlparse(video_url)
+
+        # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
+        if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
+            entries = []
+            for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
+                if not media.get('id') or media.get('e') != 'RedditVideo':
+                    continue
+                formats = []
+                if media.get('hlsUrl'):
+                    formats.extend(self._extract_m3u8_formats(
+                        unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
+                if media.get('dashUrl'):
+                    formats.extend(self._extract_mpd_formats(
+                        unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
+                if formats:
+                    entries.append({
+                        'id': media['id'],
+                        'display_id': video_id,
+                        'formats': formats,
+                        **info,
+                    })
+            if entries:
+                return self.playlist_result(entries, video_id, info.get('title'))
+            raise ExtractorError('No media found', expected=True)
+
         # Check if media is hosted on reddit:
-        reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)
+        reddit_video = traverse_obj(data, (
+            (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
         if reddit_video:
             playlist_urls = [
                 try_get(reddit_video, lambda x: unescapeHTML(x[y]))
@@ -145,12 +242,21 @@ def add_thumbnail(src):
             dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
             hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
 
-            formats = self._extract_m3u8_formats(
-                hls_playlist_url, display_id, 'mp4',
-                entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
+            formats = [{
+                'url': unescapeHTML(reddit_video['fallback_url']),
+                'height': int_or_none(reddit_video.get('height')),
+                'width': int_or_none(reddit_video.get('width')),
+                'tbr': int_or_none(reddit_video.get('bitrate_kbps')),
+                'acodec': 'none',
+                'vcodec': 'h264',
+                'ext': 'mp4',
+                'format_id': 'fallback',
+                'format_note': 'DASH video, mp4_dash',
+            }]
+            formats.extend(self._extract_m3u8_formats(
+                hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False))
             formats.extend(self._extract_mpd_formats(
                 dash_playlist_url, display_id, mpd_id='dash', fatal=False))
-            self._sort_formats(formats)
 
             return {
                 **info,
@@ -160,7 +266,6 @@ def add_thumbnail(src):
                 'duration': int_or_none(reddit_video.get('duration')),
             }
 
-        parsed_url = urlparse(video_url)
         if parsed_url.netloc == 'v.redd.it':
             self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
             return {