]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/reddit.py
[extractor/youtube] Add hyperpipe instances (#6020)
[yt-dlp.git] / yt_dlp / extractor / reddit.py
1 import random
2 import urllib.parse
3
4 from .common import InfoExtractor
5 from ..utils import (
6 ExtractorError,
7 float_or_none,
8 int_or_none,
9 traverse_obj,
10 try_get,
11 unescapeHTML,
12 url_or_none,
13 )
14
15
16 class RedditIE(InfoExtractor):
17 _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/(?P<slug>(?:r|user)/[^/]+/comments/(?P<id>[^/?#&]+))'
18 _TESTS = [{
19 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
20 'info_dict': {
21 'id': 'zv89llsvexdz',
22 'ext': 'mp4',
23 'display_id': '6rrwyj',
24 'title': 'That small heart attack.',
25 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
26 'thumbnails': 'count:4',
27 'timestamp': 1501941939,
28 'upload_date': '20170805',
29 'uploader': 'Antw87',
30 'duration': 12,
31 'like_count': int,
32 'dislike_count': int,
33 'comment_count': int,
34 'age_limit': 0,
35 'channel_id': 'videos',
36 },
37 'params': {
38 'skip_download': True,
39 },
40 }, {
41 # 1080p fallback format
42 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
43 'md5': '8b5902cfda3006bf90faea7adf765a49',
44 'info_dict': {
45 'id': 'gyh95hiqc0b11',
46 'ext': 'mp4',
47 'display_id': '90bu6w',
48 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
49 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
50 'thumbnails': 'count:7',
51 'timestamp': 1532051078,
52 'upload_date': '20180720',
53 'uploader': 'FootLoosePickleJuice',
54 'duration': 14,
55 'like_count': int,
56 'dislike_count': int,
57 'comment_count': int,
58 'age_limit': 0,
59 'channel_id': 'aww',
60 },
61 }, {
62 # User post
63 'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
64 'info_dict': {
65 'id': 'zasobba6wp071',
66 'ext': 'mp4',
67 'display_id': 'nip71r',
68 'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
69 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
70 'thumbnails': 'count:5',
71 'timestamp': 1621709093,
72 'upload_date': '20210522',
73 'uploader': 'creepyt0es',
74 'duration': 6,
75 'like_count': int,
76 'dislike_count': int,
77 'comment_count': int,
78 'age_limit': 0,
79 'channel_id': 'u_creepyt0es',
80 },
81 'params': {
82 'skip_download': True,
83 },
84 }, {
85 # videos embedded in reddit text post
86 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
87 'playlist_count': 2,
88 'info_dict': {
89 'id': 'wzqkxp',
90 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
91 },
92 }, {
93 # crossposted reddit-hosted media
94 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
95 'md5': '746180895c7b75a9d6b05341f507699a',
96 'info_dict': {
97 'id': 'a1oneun6pa5a1',
98 'ext': 'mp4',
99 'display_id': 'zjjw82',
100 'title': 'Cringe',
101 'uploader': 'Otaku-senpai69420',
102 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
103 'upload_date': '20221212',
104 'timestamp': 1670812309,
105 'duration': 16,
106 'like_count': int,
107 'dislike_count': int,
108 'comment_count': int,
109 'age_limit': 0,
110 'channel_id': 'dumbfuckers_club',
111 },
112 }, {
113 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
114 'only_matching': True,
115 }, {
116 # imgur
117 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
118 'only_matching': True,
119 }, {
120 # imgur @ old reddit
121 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
122 'only_matching': True,
123 }, {
124 # streamable
125 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
126 'only_matching': True,
127 }, {
128 # youtube
129 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
130 'only_matching': True,
131 }, {
132 # reddit video @ nm reddit
133 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
134 'only_matching': True,
135 }, {
136 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
137 'only_matching': True,
138 }]
139
140 @staticmethod
141 def _gen_session_id():
142 id_length = 16
143 rand_max = 1 << (id_length * 4)
144 return '%0.*x' % (id_length, random.randrange(rand_max))
145
146 def _real_extract(self, url):
147 subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id')
148
149 self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())
150 self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
151 data = self._download_json(f'https://{subdomain}reddit.com/{slug}/.json', video_id, fatal=False)
152 if not data:
153 # Fall back to old.reddit.com in case the requested subdomain fails
154 data = self._download_json(f'https://old.reddit.com/{slug}/.json', video_id)
155 data = data[0]['data']['children'][0]['data']
156 video_url = data['url']
157
158 over_18 = data.get('over_18')
159 if over_18 is True:
160 age_limit = 18
161 elif over_18 is False:
162 age_limit = 0
163 else:
164 age_limit = None
165
166 thumbnails = []
167
168 def add_thumbnail(src):
169 if not isinstance(src, dict):
170 return
171 thumbnail_url = url_or_none(src.get('url'))
172 if not thumbnail_url:
173 return
174 thumbnails.append({
175 'url': unescapeHTML(thumbnail_url),
176 'width': int_or_none(src.get('width')),
177 'height': int_or_none(src.get('height')),
178 })
179
180 for image in try_get(data, lambda x: x['preview']['images']) or []:
181 if not isinstance(image, dict):
182 continue
183 add_thumbnail(image.get('source'))
184 resolutions = image.get('resolutions')
185 if isinstance(resolutions, list):
186 for resolution in resolutions:
187 add_thumbnail(resolution)
188
189 info = {
190 'title': data.get('title'),
191 'thumbnails': thumbnails,
192 'timestamp': float_or_none(data.get('created_utc')),
193 'uploader': data.get('author'),
194 'channel_id': data.get('subreddit'),
195 'like_count': int_or_none(data.get('ups')),
196 'dislike_count': int_or_none(data.get('downs')),
197 'comment_count': int_or_none(data.get('num_comments')),
198 'age_limit': age_limit,
199 }
200
201 parsed_url = urllib.parse.urlparse(video_url)
202
203 # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
204 if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
205 entries = []
206 for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
207 if not media.get('id') or media.get('e') != 'RedditVideo':
208 continue
209 formats = []
210 if media.get('hlsUrl'):
211 formats.extend(self._extract_m3u8_formats(
212 unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
213 if media.get('dashUrl'):
214 formats.extend(self._extract_mpd_formats(
215 unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
216 if formats:
217 entries.append({
218 'id': media['id'],
219 'display_id': video_id,
220 'formats': formats,
221 **info,
222 })
223 if entries:
224 return self.playlist_result(entries, video_id, info.get('title'))
225 raise ExtractorError('No media found', expected=True)
226
227 # Check if media is hosted on reddit:
228 reddit_video = traverse_obj(data, (
229 (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
230 if reddit_video:
231 playlist_urls = [
232 try_get(reddit_video, lambda x: unescapeHTML(x[y]))
233 for y in ('dash_url', 'hls_url')
234 ]
235
236 # Update video_id
237 display_id = video_id
238 video_id = self._search_regex(
239 r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],
240 'video_id', default=display_id)
241
242 dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
243 hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
244
245 formats = [{
246 'url': unescapeHTML(reddit_video['fallback_url']),
247 'height': int_or_none(reddit_video.get('height')),
248 'width': int_or_none(reddit_video.get('width')),
249 'tbr': int_or_none(reddit_video.get('bitrate_kbps')),
250 'acodec': 'none',
251 'vcodec': 'h264',
252 'ext': 'mp4',
253 'format_id': 'fallback',
254 'format_note': 'DASH video, mp4_dash',
255 }]
256 formats.extend(self._extract_m3u8_formats(
257 hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False))
258 formats.extend(self._extract_mpd_formats(
259 dash_playlist_url, display_id, mpd_id='dash', fatal=False))
260
261 return {
262 **info,
263 'id': video_id,
264 'display_id': display_id,
265 'formats': formats,
266 'duration': int_or_none(reddit_video.get('duration')),
267 }
268
269 if parsed_url.netloc == 'v.redd.it':
270 self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
271 return {
272 **info,
273 'id': parsed_url.path.split('/')[1],
274 'display_id': video_id,
275 }
276
277 # Not hosted on reddit, must continue extraction
278 return {
279 **info,
280 'display_id': video_id,
281 '_type': 'url_transparent',
282 'url': video_url,
283 }