]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/reddit.py
[reddit] Prevent infinite loop
[yt-dlp.git] / yt_dlp / extractor / reddit.py
1 import random
2 from urllib.parse import urlparse
3
4 from .common import InfoExtractor
5 from ..utils import (
6 ExtractorError,
7 int_or_none,
8 float_or_none,
9 try_get,
10 unescapeHTML,
11 url_or_none,
12 traverse_obj
13 )
14
15
16 class RedditIE(InfoExtractor):
17 _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'
18 _TESTS = [{
19 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
20 'info_dict': {
21 'id': 'zv89llsvexdz',
22 'ext': 'mp4',
23 'display_id': '6rrwyj',
24 'title': 'That small heart attack.',
25 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
26 'thumbnails': 'count:4',
27 'timestamp': 1501941939,
28 'upload_date': '20170805',
29 'uploader': 'Antw87',
30 'duration': 12,
31 'like_count': int,
32 'dislike_count': int,
33 'comment_count': int,
34 'age_limit': 0,
35 },
36 'params': {
37 'skip_download': True,
38 },
39 }, {
40 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
41 'only_matching': True,
42 }, {
43 # imgur
44 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
45 'only_matching': True,
46 }, {
47 # imgur @ old reddit
48 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
49 'only_matching': True,
50 }, {
51 # streamable
52 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
53 'only_matching': True,
54 }, {
55 # youtube
56 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
57 'only_matching': True,
58 }, {
59 # reddit video @ nm reddit
60 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
61 'only_matching': True,
62 }, {
63 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
64 'only_matching': True,
65 }]
66
67 @staticmethod
68 def _gen_session_id():
69 id_length = 16
70 rand_max = 1 << (id_length * 4)
71 return '%0.*x' % (id_length, random.randrange(rand_max))
72
73 def _real_extract(self, url):
74 subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id')
75
76 self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())
77 self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
78 data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)
79 if not data:
80 # Fall back to old.reddit.com in case the requested subdomain fails
81 data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)
82 data = data[0]['data']['children'][0]['data']
83 video_url = data['url']
84
85 # Avoid recursing into the same reddit URL
86 if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
87 raise ExtractorError('No media found', expected=True)
88
89 over_18 = data.get('over_18')
90 if over_18 is True:
91 age_limit = 18
92 elif over_18 is False:
93 age_limit = 0
94 else:
95 age_limit = None
96
97 thumbnails = []
98
99 def add_thumbnail(src):
100 if not isinstance(src, dict):
101 return
102 thumbnail_url = url_or_none(src.get('url'))
103 if not thumbnail_url:
104 return
105 thumbnails.append({
106 'url': unescapeHTML(thumbnail_url),
107 'width': int_or_none(src.get('width')),
108 'height': int_or_none(src.get('height')),
109 })
110
111 for image in try_get(data, lambda x: x['preview']['images']) or []:
112 if not isinstance(image, dict):
113 continue
114 add_thumbnail(image.get('source'))
115 resolutions = image.get('resolutions')
116 if isinstance(resolutions, list):
117 for resolution in resolutions:
118 add_thumbnail(resolution)
119
120 info = {
121 'title': data.get('title'),
122 'thumbnails': thumbnails,
123 'timestamp': float_or_none(data.get('created_utc')),
124 'uploader': data.get('author'),
125 'like_count': int_or_none(data.get('ups')),
126 'dislike_count': int_or_none(data.get('downs')),
127 'comment_count': int_or_none(data.get('num_comments')),
128 'age_limit': age_limit,
129 }
130
131 # Check if media is hosted on reddit:
132 reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)
133 if reddit_video:
134 playlist_urls = [
135 try_get(reddit_video, lambda x: unescapeHTML(x[y]))
136 for y in ('dash_url', 'hls_url')
137 ]
138
139 # Update video_id
140 display_id = video_id
141 video_id = self._search_regex(
142 r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],
143 'video_id', default=display_id)
144
145 dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
146 hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
147
148 formats = self._extract_m3u8_formats(
149 hls_playlist_url, display_id, 'mp4',
150 entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
151 formats.extend(self._extract_mpd_formats(
152 dash_playlist_url, display_id, mpd_id='dash', fatal=False))
153 self._sort_formats(formats)
154
155 return {
156 **info,
157 'id': video_id,
158 'display_id': display_id,
159 'formats': formats,
160 'duration': int_or_none(reddit_video.get('duration')),
161 }
162
163 parsed_url = urlparse(video_url)
164 if parsed_url.netloc == 'v.redd.it':
165 self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
166 return {
167 **info,
168 'id': parsed_url.path.split('/')[1],
169 'display_id': video_id,
170 }
171
172 # Not hosted on reddit, must continue extraction
173 return {
174 **info,
175 'display_id': video_id,
176 '_type': 'url_transparent',
177 'url': video_url,
178 }