]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/reddit.py
[reddit] bugfix for 8e3fd7e034cdd54972d13394821cd9e55e1c3735
[yt-dlp.git] / yt_dlp / extractor / reddit.py
1 import random
2
3 from .common import InfoExtractor
4 from ..utils import (
5 ExtractorError,
6 int_or_none,
7 float_or_none,
8 try_get,
9 unescapeHTML,
10 url_or_none,
11 )
12
13
14 class RedditIE(InfoExtractor):
15 _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
16 _TEST = {
17 # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
18 'url': 'https://v.redd.it/zv89llsvexdz',
19 'md5': '0a070c53eba7ec4534d95a5a1259e253',
20 'info_dict': {
21 'id': 'zv89llsvexdz',
22 'ext': 'mp4',
23 'title': 'zv89llsvexdz',
24 },
25 'params': {
26 'format': 'bestvideo',
27 },
28 }
29
30 def _real_extract(self, url):
31 video_id = self._match_id(url)
32
33 formats = self._extract_m3u8_formats(
34 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
35 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
36
37 formats.extend(self._extract_mpd_formats(
38 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
39 mpd_id='dash', fatal=False))
40
41 self._sort_formats(formats)
42
43 return {
44 'id': video_id,
45 'title': video_id,
46 'formats': formats,
47 }
48
49
50 class RedditRIE(InfoExtractor):
51 _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'
52 _TESTS = [{
53 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
54 'info_dict': {
55 'id': 'zv89llsvexdz',
56 'ext': 'mp4',
57 'title': 'That small heart attack.',
58 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
59 'thumbnails': 'count:4',
60 'timestamp': 1501941939,
61 'upload_date': '20170805',
62 'uploader': 'Antw87',
63 'duration': 12,
64 'like_count': int,
65 'dislike_count': int,
66 'comment_count': int,
67 'age_limit': 0,
68 },
69 'params': {
70 'format': 'bestvideo',
71 'skip_download': True,
72 },
73 }, {
74 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
75 'only_matching': True,
76 }, {
77 # imgur
78 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
79 'only_matching': True,
80 }, {
81 # imgur @ old reddit
82 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
83 'only_matching': True,
84 }, {
85 # streamable
86 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
87 'only_matching': True,
88 }, {
89 # youtube
90 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
91 'only_matching': True,
92 }, {
93 # reddit video @ nm reddit
94 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
95 'only_matching': True,
96 }, {
97 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
98 'only_matching': True,
99 }]
100
101 @staticmethod
102 def _gen_session_id():
103 id_length = 16
104 rand_max = 1 << (id_length * 4)
105 return '%0.*x' % (id_length, random.randrange(rand_max))
106
107 def _real_extract(self, url):
108 subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id')
109
110 self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())
111 self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
112 data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)
113 if not data:
114 # Fall back to old.reddit.com in case the requested subdomain fails
115 data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)
116 data = data[0]['data']['children'][0]['data']
117 video_url = data['url']
118
119 # Avoid recursing into the same reddit URL
120 if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
121 raise ExtractorError('No media found', expected=True)
122
123 over_18 = data.get('over_18')
124 if over_18 is True:
125 age_limit = 18
126 elif over_18 is False:
127 age_limit = 0
128 else:
129 age_limit = None
130
131 thumbnails = []
132
133 def add_thumbnail(src):
134 if not isinstance(src, dict):
135 return
136 thumbnail_url = url_or_none(src.get('url'))
137 if not thumbnail_url:
138 return
139 thumbnails.append({
140 'url': unescapeHTML(thumbnail_url),
141 'width': int_or_none(src.get('width')),
142 'height': int_or_none(src.get('height')),
143 })
144
145 for image in try_get(data, lambda x: x['preview']['images']) or []:
146 if not isinstance(image, dict):
147 continue
148 add_thumbnail(image.get('source'))
149 resolutions = image.get('resolutions')
150 if isinstance(resolutions, list):
151 for resolution in resolutions:
152 add_thumbnail(resolution)
153
154 return {
155 '_type': 'url_transparent',
156 'url': video_url,
157 'title': data.get('title'),
158 'thumbnails': thumbnails,
159 'timestamp': float_or_none(data.get('created_utc')),
160 'uploader': data.get('author'),
161 'duration': int_or_none(try_get(
162 data,
163 (lambda x: x['media']['reddit_video']['duration'],
164 lambda x: x['secure_media']['reddit_video']['duration']))),
165 'like_count': int_or_none(data.get('ups')),
166 'dislike_count': int_or_none(data.get('downs')),
167 'comment_count': int_or_none(data.get('num_comments')),
168 'age_limit': age_limit,
169 }