]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/reddit.py
62f669f35da0140ab6de78058a55828f40b1f352
[yt-dlp.git] / yt_dlp / extractor / reddit.py
1 import urllib.parse
2
3 from .common import InfoExtractor
4 from ..utils import (
5 ExtractorError,
6 float_or_none,
7 int_or_none,
8 traverse_obj,
9 try_get,
10 unescapeHTML,
11 urlencode_postdata,
12 url_or_none,
13 )
14
15
16 class RedditIE(InfoExtractor):
17 _NETRC_MACHINE = 'reddit'
18 _VALID_URL = r'https?://(?P<host>(?:\w+\.)?reddit(?:media)?\.com)/(?P<slug>(?:(?:r|user)/[^/]+/)?comments/(?P<id>[^/?#&]+))'
19 _TESTS = [{
20 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
21 'info_dict': {
22 'id': 'zv89llsvexdz',
23 'ext': 'mp4',
24 'display_id': '6rrwyj',
25 'title': 'That small heart attack.',
26 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
27 'thumbnails': 'count:4',
28 'timestamp': 1501941939,
29 'upload_date': '20170805',
30 'uploader': 'Antw87',
31 'duration': 12,
32 'like_count': int,
33 'dislike_count': int,
34 'comment_count': int,
35 'age_limit': 0,
36 'channel_id': 'videos',
37 },
38 'params': {
39 'skip_download': True,
40 },
41 }, {
42 # 1080p fallback format
43 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
44 'md5': '8b5902cfda3006bf90faea7adf765a49',
45 'info_dict': {
46 'id': 'gyh95hiqc0b11',
47 'ext': 'mp4',
48 'display_id': '90bu6w',
49 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
50 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
51 'thumbnails': 'count:7',
52 'timestamp': 1532051078,
53 'upload_date': '20180720',
54 'uploader': 'FootLoosePickleJuice',
55 'duration': 14,
56 'like_count': int,
57 'dislike_count': int,
58 'comment_count': int,
59 'age_limit': 0,
60 'channel_id': 'aww',
61 },
62 }, {
63 # User post
64 'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
65 'info_dict': {
66 'id': 'zasobba6wp071',
67 'ext': 'mp4',
68 'display_id': 'nip71r',
69 'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
70 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
71 'thumbnails': 'count:5',
72 'timestamp': 1621709093,
73 'upload_date': '20210522',
74 'uploader': 'creepyt0es',
75 'duration': 6,
76 'like_count': int,
77 'dislike_count': int,
78 'comment_count': int,
79 'age_limit': 0,
80 'channel_id': 'u_creepyt0es',
81 },
82 'params': {
83 'skip_download': True,
84 },
85 }, {
86 # videos embedded in reddit text post
87 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
88 'playlist_count': 2,
89 'info_dict': {
90 'id': 'wzqkxp',
91 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
92 },
93 }, {
94 # crossposted reddit-hosted media
95 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
96 'md5': '746180895c7b75a9d6b05341f507699a',
97 'info_dict': {
98 'id': 'a1oneun6pa5a1',
99 'ext': 'mp4',
100 'display_id': 'zjjw82',
101 'title': 'Cringe',
102 'uploader': 'Otaku-senpai69420',
103 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
104 'upload_date': '20221212',
105 'timestamp': 1670812309,
106 'duration': 16,
107 'like_count': int,
108 'dislike_count': int,
109 'comment_count': int,
110 'age_limit': 0,
111 'channel_id': 'dumbfuckers_club',
112 },
113 }, {
114 # post link without subreddit
115 'url': 'https://www.reddit.com/comments/124pp33',
116 'md5': '15eec9d828adcef4468b741a7e45a395',
117 'info_dict': {
118 'id': 'antsenjc2jqa1',
119 'ext': 'mp4',
120 'display_id': '124pp33',
121 'title': 'Harmless prank of some old friends',
122 'uploader': 'Dudezila',
123 'channel_id': 'ContagiousLaughter',
124 'duration': 17,
125 'upload_date': '20230328',
126 'timestamp': 1680012043,
127 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
128 'age_limit': 0,
129 'comment_count': int,
130 'dislike_count': int,
131 'like_count': int,
132 },
133 }, {
134 # quarantined subreddit post
135 'url': 'https://old.reddit.com/r/GenZedong/comments/12fujy3/based_hasan/',
136 'md5': '3156ea69e3c1f1b6259683c5abd36e71',
137 'info_dict': {
138 'id': '8bwtclfggpsa1',
139 'ext': 'mp4',
140 'display_id': '12fujy3',
141 'title': 'Based Hasan?',
142 'uploader': 'KingNigelXLII',
143 'channel_id': 'GenZedong',
144 'duration': 16,
145 'upload_date': '20230408',
146 'timestamp': 1680979138,
147 'age_limit': 0,
148 'comment_count': int,
149 'dislike_count': int,
150 'like_count': int,
151 },
152 'skip': 'Requires account that has opted-in to the GenZedong subreddit',
153 }, {
154 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
155 'only_matching': True,
156 }, {
157 # imgur
158 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
159 'only_matching': True,
160 }, {
161 # imgur @ old reddit
162 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
163 'only_matching': True,
164 }, {
165 # streamable
166 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
167 'only_matching': True,
168 }, {
169 # youtube
170 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
171 'only_matching': True,
172 }, {
173 # reddit video @ nm reddit
174 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
175 'only_matching': True,
176 }, {
177 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
178 'only_matching': True,
179 }]
180
181 def _perform_login(self, username, password):
182 captcha = self._download_json(
183 'https://www.reddit.com/api/requires_captcha/login.json', None,
184 'Checking login requirement')['required']
185 if captcha:
186 raise ExtractorError('Reddit is requiring captcha before login', expected=True)
187 login = self._download_json(
188 f'https://www.reddit.com/api/login/{username}', None, data=urlencode_postdata({
189 'op': 'login-main',
190 'user': username,
191 'passwd': password,
192 'api_type': 'json',
193 }), note='Logging in', errnote='Login request failed')
194 errors = '; '.join(traverse_obj(login, ('json', 'errors', ..., 1)))
195 if errors:
196 raise ExtractorError(f'Unable to login, Reddit API says {errors}', expected=True)
197 elif not traverse_obj(login, ('json', 'data', 'cookie', {str})):
198 raise ExtractorError('Unable to login, no cookie was returned')
199
200 def _real_extract(self, url):
201 host, slug, video_id = self._match_valid_url(url).group('host', 'slug', 'id')
202
203 data = self._download_json(
204 f'https://{host}/{slug}/.json', video_id, fatal=False, expected_status=403)
205 if not data:
206 fallback_host = 'old.reddit.com' if host != 'old.reddit.com' else 'www.reddit.com'
207 self.to_screen(f'{host} request failed, retrying with {fallback_host}')
208 data = self._download_json(
209 f'https://{fallback_host}/{slug}/.json', video_id, expected_status=403)
210
211 if traverse_obj(data, 'error') == 403:
212 reason = data.get('reason')
213 if reason == 'quarantined':
214 self.raise_login_required('Quarantined subreddit; an account that has opted in is required')
215 elif reason == 'private':
216 self.raise_login_required('Private subreddit; an account that has been approved is required')
217 else:
218 raise ExtractorError(f'HTTP Error 403 Forbidden; reason given: {reason}')
219
220 data = data[0]['data']['children'][0]['data']
221 video_url = data['url']
222
223 over_18 = data.get('over_18')
224 if over_18 is True:
225 age_limit = 18
226 elif over_18 is False:
227 age_limit = 0
228 else:
229 age_limit = None
230
231 thumbnails = []
232
233 def add_thumbnail(src):
234 if not isinstance(src, dict):
235 return
236 thumbnail_url = url_or_none(src.get('url'))
237 if not thumbnail_url:
238 return
239 thumbnails.append({
240 'url': unescapeHTML(thumbnail_url),
241 'width': int_or_none(src.get('width')),
242 'height': int_or_none(src.get('height')),
243 'http_headers': {'Accept': '*/*'},
244 })
245
246 for image in try_get(data, lambda x: x['preview']['images']) or []:
247 if not isinstance(image, dict):
248 continue
249 add_thumbnail(image.get('source'))
250 resolutions = image.get('resolutions')
251 if isinstance(resolutions, list):
252 for resolution in resolutions:
253 add_thumbnail(resolution)
254
255 info = {
256 'title': data.get('title'),
257 'thumbnails': thumbnails,
258 'timestamp': float_or_none(data.get('created_utc')),
259 'uploader': data.get('author'),
260 'channel_id': data.get('subreddit'),
261 'like_count': int_or_none(data.get('ups')),
262 'dislike_count': int_or_none(data.get('downs')),
263 'comment_count': int_or_none(data.get('num_comments')),
264 'age_limit': age_limit,
265 }
266
267 parsed_url = urllib.parse.urlparse(video_url)
268
269 # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
270 if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
271 entries = []
272 for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
273 if not media.get('id') or media.get('e') != 'RedditVideo':
274 continue
275 formats = []
276 if media.get('hlsUrl'):
277 formats.extend(self._extract_m3u8_formats(
278 unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
279 if media.get('dashUrl'):
280 formats.extend(self._extract_mpd_formats(
281 unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
282 if formats:
283 entries.append({
284 'id': media['id'],
285 'display_id': video_id,
286 'formats': formats,
287 **info,
288 })
289 if entries:
290 return self.playlist_result(entries, video_id, info.get('title'))
291 raise ExtractorError('No media found', expected=True)
292
293 # Check if media is hosted on reddit:
294 reddit_video = traverse_obj(data, (
295 (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
296 if reddit_video:
297 playlist_urls = [
298 try_get(reddit_video, lambda x: unescapeHTML(x[y]))
299 for y in ('dash_url', 'hls_url')
300 ]
301
302 # Update video_id
303 display_id = video_id
304 video_id = self._search_regex(
305 r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],
306 'video_id', default=display_id)
307
308 dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
309 hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
310
311 formats = [{
312 'url': unescapeHTML(reddit_video['fallback_url']),
313 'height': int_or_none(reddit_video.get('height')),
314 'width': int_or_none(reddit_video.get('width')),
315 'tbr': int_or_none(reddit_video.get('bitrate_kbps')),
316 'acodec': 'none',
317 'vcodec': 'h264',
318 'ext': 'mp4',
319 'format_id': 'fallback',
320 'format_note': 'DASH video, mp4_dash',
321 }]
322 hls_fmts, subtitles = self._extract_m3u8_formats_and_subtitles(
323 hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False)
324 formats.extend(hls_fmts)
325 dash_fmts, dash_subs = self._extract_mpd_formats_and_subtitles(
326 dash_playlist_url, display_id, mpd_id='dash', fatal=False)
327 formats.extend(dash_fmts)
328 self._merge_subtitles(dash_subs, target=subtitles)
329
330 return {
331 **info,
332 'id': video_id,
333 'display_id': display_id,
334 'formats': formats,
335 'subtitles': subtitles,
336 'duration': int_or_none(reddit_video.get('duration')),
337 }
338
339 if parsed_url.netloc == 'v.redd.it':
340 self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
341 return {
342 **info,
343 'id': parsed_url.path.split('/')[1],
344 'display_id': video_id,
345 }
346
347 # Not hosted on reddit, must continue extraction
348 return {
349 **info,
350 'display_id': video_id,
351 '_type': 'url_transparent',
352 'url': video_url,
353 }