3 from .common
import InfoExtractor
18 class RedditIE(InfoExtractor
):
19 _NETRC_MACHINE
= 'reddit'
20 _VALID_URL
= r
'https?://(?P<host>(?:\w+\.)?reddit(?:media)?\.com)/(?P<slug>(?:(?:r|user)/[^/]+/)?comments/(?P<id>[^/?#&]+))'
22 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
26 'display_id': '6rrwyj',
27 'title': 'That small heart attack.',
28 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
29 'thumbnails': 'count:4',
30 'timestamp': 1501941939,
31 'upload_date': '20170805',
38 'channel_id': 'videos',
41 'skip_download': True,
44 # 1080p fallback format
45 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
46 'md5': '8b5902cfda3006bf90faea7adf765a49',
48 'id': 'gyh95hiqc0b11',
50 'display_id': '90bu6w',
51 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
52 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
53 'thumbnails': 'count:7',
54 'timestamp': 1532051078,
55 'upload_date': '20180720',
56 'uploader': 'FootLoosePickleJuice',
66 'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
68 'id': 'zasobba6wp071',
70 'display_id': 'nip71r',
71 'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
72 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
73 'thumbnails': 'count:5',
74 'timestamp': 1621709093,
75 'upload_date': '20210522',
76 'uploader': 'creepyt0es',
82 'channel_id': 'u_creepyt0es',
85 'skip_download': True,
88 # videos embedded in reddit text post
89 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
93 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
96 # crossposted reddit-hosted media
97 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
98 'md5': '746180895c7b75a9d6b05341f507699a',
100 'id': 'a1oneun6pa5a1',
102 'display_id': 'zjjw82',
104 'uploader': 'Otaku-senpai69420',
105 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
106 'upload_date': '20221212',
107 'timestamp': 1670812309,
110 'dislike_count': int,
111 'comment_count': int,
113 'channel_id': 'dumbfuckers_club',
116 # post link without subreddit
117 'url': 'https://www.reddit.com/comments/124pp33',
118 'md5': '15eec9d828adcef4468b741a7e45a395',
120 'id': 'antsenjc2jqa1',
122 'display_id': '124pp33',
123 'title': 'Harmless prank of some old friends',
124 'uploader': 'Dudezila',
125 'channel_id': 'ContagiousLaughter',
127 'upload_date': '20230328',
128 'timestamp': 1680012043,
129 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
131 'comment_count': int,
132 'dislike_count': int,
136 # quarantined subreddit post
137 'url': 'https://old.reddit.com/r/GenZedong/comments/12fujy3/based_hasan/',
138 'md5': '3156ea69e3c1f1b6259683c5abd36e71',
140 'id': '8bwtclfggpsa1',
142 'display_id': '12fujy3',
143 'title': 'Based Hasan?',
144 'uploader': 'KingNigelXLII',
145 'channel_id': 'GenZedong',
147 'upload_date': '20230408',
148 'timestamp': 1680979138,
150 'comment_count': int,
151 'dislike_count': int,
154 'skip': 'Requires account that has opted-in to the GenZedong subreddit',
156 # subtitles in HLS manifest
157 'url': 'https://www.reddit.com/r/Unexpected/comments/1cl9h0u/the_insurance_claim_will_be_interesting/',
159 'id': 'a2mdj5d57qyc1',
161 'display_id': '1cl9h0u',
162 'title': 'The insurance claim will be interesting',
163 'uploader': 'darrenpauli',
164 'channel_id': 'Unexpected',
166 'upload_date': '20240506',
167 'timestamp': 1714966382,
169 'comment_count': int,
170 'dislike_count': int,
172 'subtitles': {'en': 'mincount:1'}
,
175 'skip_download': True,
178 # subtitles from caption-url
179 'url': 'https://www.reddit.com/r/soccer/comments/1cxwzso/tottenham_1_0_newcastle_united_james_maddison_31/',
181 'id': 'xbmj4t3igy1d1',
183 'display_id': '1cxwzso',
184 'title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
185 'uploader': 'Woodstovia',
186 'channel_id': 'soccer',
188 'upload_date': '20240522',
189 'timestamp': 1716373798,
191 'comment_count': int,
192 'dislike_count': int,
194 'subtitles': {'en': 'mincount:1'}
,
197 'skip_download': True,
198 'writesubtitles': True,
201 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
202 'only_matching': True,
205 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
206 'only_matching': True,
209 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
210 'only_matching': True,
213 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
214 'only_matching': True,
217 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
218 'only_matching': True,
220 # reddit video @ nm reddit
221 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
222 'only_matching': True,
224 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
225 'only_matching': True,
228 def _perform_login(self
, username
, password
):
229 captcha
= self
._download
_json
(
230 'https://www.reddit.com/api/requires_captcha/login.json', None,
231 'Checking login requirement')['required']
233 raise ExtractorError('Reddit is requiring captcha before login', expected
=True)
234 login
= self
._download
_json
(
235 f
'https://www.reddit.com/api/login/{username}', None, data
=urlencode_postdata({
240 }), note
='Logging in', errnote
='Login request failed')
241 errors
= '; '.join(traverse_obj(login
, ('json', 'errors', ..., 1)))
243 raise ExtractorError(f
'Unable to login, Reddit API says {errors}', expected
=True)
244 elif not traverse_obj(login
, ('json', 'data', 'cookie', {str}
)):
245 raise ExtractorError('Unable to login, no cookie was returned')
247 def _get_subtitles(self
, video_id
):
248 # Fallback if there were no subtitles provided by DASH or HLS manifests
249 caption_url
= f
'https://v.redd.it/{video_id}/wh_ben_en.vtt'
250 if self
._is
_valid
_url
(caption_url
, video_id
, item
='subtitles'):
251 return {'en': [{'url': caption_url}
]}
253 def _real_extract(self
, url
):
254 host
, slug
, video_id
= self
._match
_valid
_url
(url
).group('host', 'slug', 'id')
256 data
= self
._download
_json
(
257 f
'https://{host}/{slug}/.json', video_id
, fatal
=False, expected_status
=403)
259 fallback_host
= 'old.reddit.com' if host
!= 'old.reddit.com' else 'www.reddit.com'
260 self
.to_screen(f
'{host} request failed, retrying with {fallback_host}')
261 data
= self
._download
_json
(
262 f
'https://{fallback_host}/{slug}/.json', video_id
, expected_status
=403)
264 if traverse_obj(data
, 'error') == 403:
265 reason
= data
.get('reason')
266 if reason
== 'quarantined':
267 self
.raise_login_required('Quarantined subreddit; an account that has opted in is required')
268 elif reason
== 'private':
269 self
.raise_login_required('Private subreddit; an account that has been approved is required')
271 raise ExtractorError(f
'HTTP Error 403 Forbidden; reason given: {reason}')
273 data
= data
[0]['data']['children'][0]['data']
274 video_url
= data
['url']
276 over_18
= data
.get('over_18')
279 elif over_18
is False:
286 def add_thumbnail(src
):
287 if not isinstance(src
, dict):
289 thumbnail_url
= url_or_none(src
.get('url'))
290 if not thumbnail_url
:
293 'url': unescapeHTML(thumbnail_url
),
294 'width': int_or_none(src
.get('width')),
295 'height': int_or_none(src
.get('height')),
296 'http_headers': {'Accept': '*/*'}
,
299 for image
in try_get(data
, lambda x
: x
['preview']['images']) or []:
300 if not isinstance(image
, dict):
302 add_thumbnail(image
.get('source'))
303 resolutions
= image
.get('resolutions')
304 if isinstance(resolutions
, list):
305 for resolution
in resolutions
:
306 add_thumbnail(resolution
)
309 'title': data
.get('title'),
310 'thumbnails': thumbnails
,
311 'timestamp': float_or_none(data
.get('created_utc')),
312 'uploader': data
.get('author'),
313 'channel_id': data
.get('subreddit'),
314 'like_count': int_or_none(data
.get('ups')),
315 'dislike_count': int_or_none(data
.get('downs')),
316 'comment_count': int_or_none(data
.get('num_comments')),
317 'age_limit': age_limit
,
320 parsed_url
= urllib
.parse
.urlparse(video_url
)
322 # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
323 if 'reddit.com' in parsed_url
.netloc
and f
'/{video_id}/' in parsed_url
.path
:
325 for media
in traverse_obj(data
, ('media_metadata', ...), expected_type
=dict):
326 if not media
.get('id') or media
.get('e') != 'RedditVideo':
329 if media
.get('hlsUrl'):
330 formats
.extend(self
._extract
_m
3u8_formats
(
331 unescapeHTML(media
['hlsUrl']), video_id
, 'mp4', m3u8_id
='hls', fatal
=False))
332 if media
.get('dashUrl'):
333 formats
.extend(self
._extract
_mpd
_formats
(
334 unescapeHTML(media
['dashUrl']), video_id
, mpd_id
='dash', fatal
=False))
338 'display_id': video_id
,
343 return self
.playlist_result(entries
, video_id
, info
.get('title'))
344 raise ExtractorError('No media found', expected
=True)
346 # Check if media is hosted on reddit:
347 reddit_video
= traverse_obj(data
, (
348 (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all
=False)
351 try_get(reddit_video
, lambda x
: unescapeHTML(x
[y
]))
352 for y
in ('dash_url', 'hls_url')
356 display_id
= video_id
357 video_id
= self
._search
_regex
(
358 r
'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video
['fallback_url'],
359 'video_id', default
=display_id
)
361 dash_playlist_url
= playlist_urls
[0] or f
'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
362 hls_playlist_url
= playlist_urls
[1] or f
'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
363 qs
= traverse_obj(parse_qs(hls_playlist_url
), {
364 'f': ('f', 0, {lambda x: ','.join([x, 'subsAll']) if x else 'hd,subsAll'}
),
366 hls_playlist_url
= update_url_query(hls_playlist_url
, qs
)
369 'url': unescapeHTML(reddit_video
['fallback_url']),
370 'height': int_or_none(reddit_video
.get('height')),
371 'width': int_or_none(reddit_video
.get('width')),
372 'tbr': int_or_none(reddit_video
.get('bitrate_kbps')),
376 'format_id': 'fallback',
377 'format_note': 'DASH video, mp4_dash',
379 hls_fmts
, subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
380 hls_playlist_url
, display_id
, 'mp4', m3u8_id
='hls', fatal
=False)
381 formats
.extend(hls_fmts
)
382 dash_fmts
, dash_subs
= self
._extract
_mpd
_formats
_and
_subtitles
(
383 dash_playlist_url
, display_id
, mpd_id
='dash', fatal
=False)
384 formats
.extend(dash_fmts
)
385 self
._merge
_subtitles
(dash_subs
, target
=subtitles
)
390 'display_id': display_id
,
392 'subtitles': subtitles
or self
.extract_subtitles(video_id
),
393 'duration': int_or_none(reddit_video
.get('duration')),
396 if parsed_url
.netloc
== 'v.redd.it':
397 self
.raise_no_formats('This video is processing', expected
=True, video_id
=video_id
)
400 'id': parsed_url
.path
.split('/')[1],
401 'display_id': video_id
,
404 # Not hosted on reddit, must continue extraction
407 'display_id': video_id
,
408 '_type': 'url_transparent',