]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/reddit.py
[extractor/drtv] Add series extractors (#5644)
[yt-dlp.git] / yt_dlp / extractor / reddit.py
CommitLineData
8e3fd7e0 1import random
0e96b408 2import urllib.parse
9bb2c767 3
0c43a481
S
4from .common import InfoExtractor
5from ..utils import (
6 ExtractorError,
0c43a481 7 float_or_none,
0e96b408 8 int_or_none,
9 traverse_obj,
29f7c58a 10 try_get,
11 unescapeHTML,
97abf05a 12 url_or_none,
0c43a481
S
13)
14
15
16class RedditIE(InfoExtractor):
8e3fd7e0 17 _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'
0c43a481
S
18 _TESTS = [{
19 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
20 'info_dict': {
21 'id': 'zv89llsvexdz',
22 'ext': 'mp4',
07689fc1 23 'display_id': '6rrwyj',
0c43a481 24 'title': 'That small heart attack.',
29f7c58a 25 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
26 'thumbnails': 'count:4',
0c43a481
S
27 'timestamp': 1501941939,
28 'upload_date': '20170805',
29 'uploader': 'Antw87',
29f7c58a 30 'duration': 12,
0c43a481
S
31 'like_count': int,
32 'dislike_count': int,
33 'comment_count': int,
34 'age_limit': 0,
35 },
36 'params': {
0c43a481
S
37 'skip_download': True,
38 },
2e565f5b 39 }, {
40 # 1080p fallback format
41 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
42 'md5': '8b5902cfda3006bf90faea7adf765a49',
43 'info_dict': {
44 'id': 'gyh95hiqc0b11',
45 'ext': 'mp4',
46 'display_id': '90bu6w',
47 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
48 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
49 'thumbnails': 'count:7',
50 'timestamp': 1532051078,
51 'upload_date': '20180720',
52 'uploader': 'FootLoosePickleJuice',
53 'duration': 14,
54 'like_count': int,
55 'dislike_count': int,
56 'comment_count': int,
57 'age_limit': 0,
58 },
0e96b408 59 }, {
60 # videos embedded in reddit text post
61 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
62 'playlist_count': 2,
63 'info_dict': {
64 'id': 'wzqkxp',
65 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
66 },
1fc08914 67 }, {
68 # crossposted reddit-hosted media
69 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
70 'md5': '746180895c7b75a9d6b05341f507699a',
71 'info_dict': {
72 'id': 'a1oneun6pa5a1',
73 'ext': 'mp4',
74 'display_id': 'zjjw82',
75 'title': 'Cringe',
76 'uploader': 'Otaku-senpai69420',
77 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
78 'upload_date': '20221212',
79 'timestamp': 1670812309,
80 'duration': 16,
81 'like_count': int,
82 'dislike_count': int,
83 'comment_count': int,
84 'age_limit': 0,
85 },
0c43a481
S
86 }, {
87 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
88 'only_matching': True,
89 }, {
90 # imgur
91 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
92 'only_matching': True,
12b0d4e0
M
93 }, {
94 # imgur @ old reddit
95 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
96 'only_matching': True,
0c43a481
S
97 }, {
98 # streamable
99 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
100 'only_matching': True,
101 }, {
102 # youtube
103 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
104 'only_matching': True,
dbd5c502 105 }, {
106 # reddit video @ nm reddit
107 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
108 'only_matching': True,
c470901c 109 }, {
110 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
111 'only_matching': True,
0c43a481
S
112 }]
113
8e3fd7e0 114 @staticmethod
115 def _gen_session_id():
116 id_length = 16
117 rand_max = 1 << (id_length * 4)
118 return '%0.*x' % (id_length, random.randrange(rand_max))
0c43a481 119
8e3fd7e0 120 def _real_extract(self, url):
121 subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id')
122
123 self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())
124 self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
1b6bb4a8 125 data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)
8e3fd7e0 126 if not data:
127 # Fall back to old.reddit.com in case the requested subdomain fails
128 data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)
129 data = data[0]['data']['children'][0]['data']
0c43a481
S
130 video_url = data['url']
131
0c43a481
S
132 over_18 = data.get('over_18')
133 if over_18 is True:
134 age_limit = 18
135 elif over_18 is False:
136 age_limit = 0
137 else:
138 age_limit = None
139
29f7c58a 140 thumbnails = []
141
142 def add_thumbnail(src):
143 if not isinstance(src, dict):
144 return
145 thumbnail_url = url_or_none(src.get('url'))
146 if not thumbnail_url:
147 return
148 thumbnails.append({
149 'url': unescapeHTML(thumbnail_url),
150 'width': int_or_none(src.get('width')),
151 'height': int_or_none(src.get('height')),
152 })
153
154 for image in try_get(data, lambda x: x['preview']['images']) or []:
155 if not isinstance(image, dict):
156 continue
157 add_thumbnail(image.get('source'))
158 resolutions = image.get('resolutions')
159 if isinstance(resolutions, list):
160 for resolution in resolutions:
161 add_thumbnail(resolution)
162
e16fefd8 163 info = {
0c43a481 164 'title': data.get('title'),
29f7c58a 165 'thumbnails': thumbnails,
0c43a481
S
166 'timestamp': float_or_none(data.get('created_utc')),
167 'uploader': data.get('author'),
168 'like_count': int_or_none(data.get('ups')),
169 'dislike_count': int_or_none(data.get('downs')),
170 'comment_count': int_or_none(data.get('num_comments')),
171 'age_limit': age_limit,
172 }
e16fefd8 173
0e96b408 174 parsed_url = urllib.parse.urlparse(video_url)
175
176 # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
177 if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
178 entries = []
179 for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
180 if not media.get('id') or media.get('e') != 'RedditVideo':
181 continue
182 formats = []
183 if media.get('hlsUrl'):
184 formats.extend(self._extract_m3u8_formats(
185 unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
186 if media.get('dashUrl'):
187 formats.extend(self._extract_mpd_formats(
188 unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
189 if formats:
190 entries.append({
191 'id': media['id'],
192 'display_id': video_id,
193 'formats': formats,
194 **info,
195 })
196 if entries:
197 return self.playlist_result(entries, video_id, info.get('title'))
198 raise ExtractorError('No media found', expected=True)
199
e16fefd8 200 # Check if media is hosted on reddit:
1fc08914 201 reddit_video = traverse_obj(data, (
202 (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
e16fefd8
JL
203 if reddit_video:
204 playlist_urls = [
205 try_get(reddit_video, lambda x: unescapeHTML(x[y]))
206 for y in ('dash_url', 'hls_url')
207 ]
208
209 # Update video_id
210 display_id = video_id
211 video_id = self._search_regex(
212 r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],
213 'video_id', default=display_id)
214
215 dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
216 hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
217
2e565f5b 218 formats = [{
219 'url': unescapeHTML(reddit_video['fallback_url']),
220 'height': int_or_none(reddit_video.get('height')),
221 'width': int_or_none(reddit_video.get('width')),
222 'tbr': int_or_none(reddit_video.get('bitrate_kbps')),
223 'acodec': 'none',
02b2f9fa 224 'vcodec': 'h264',
2e565f5b 225 'ext': 'mp4',
226 'format_id': 'fallback',
227 'format_note': 'DASH video, mp4_dash',
228 }]
229 formats.extend(self._extract_m3u8_formats(
230 hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False))
e16fefd8
JL
231 formats.extend(self._extract_mpd_formats(
232 dash_playlist_url, display_id, mpd_id='dash', fatal=False))
e16fefd8
JL
233
234 return {
235 **info,
236 'id': video_id,
237 'display_id': display_id,
238 'formats': formats,
239 'duration': int_or_none(reddit_video.get('duration')),
240 }
241
07689fc1 242 if parsed_url.netloc == 'v.redd.it':
243 self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
244 return {
245 **info,
246 'id': parsed_url.path.split('/')[1],
247 'display_id': video_id,
248 }
249
e16fefd8
JL
250 # Not hosted on reddit, must continue extraction
251 return {
252 **info,
253 'display_id': video_id,
254 '_type': 'url_transparent',
255 'url': video_url,
256 }