]>
jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/reddit.py
3 from .common
import InfoExtractor
14 class RedditIE(InfoExtractor
):
15 _VALID_URL
= r
'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
17 # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
18 'url': 'https://v.redd.it/zv89llsvexdz',
19 'md5': '0a070c53eba7ec4534d95a5a1259e253',
23 'title': 'zv89llsvexdz',
26 'format': 'bestvideo',
30 def _real_extract(self
, url
):
31 video_id
= self
._match
_id
(url
)
33 formats
= self
._extract
_m
3u8_formats
(
34 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id
, video_id
,
35 'mp4', entry_protocol
='m3u8_native', m3u8_id
='hls', fatal
=False)
37 formats
.extend(self
._extract
_mpd
_formats
(
38 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id
, video_id
,
39 mpd_id
='dash', fatal
=False))
41 self
._sort
_formats
(formats
)
50 class RedditRIE(InfoExtractor
):
51 _VALID_URL
= r
'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'
53 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
57 'title': 'That small heart attack.',
58 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
59 'thumbnails': 'count:4',
60 'timestamp': 1501941939,
61 'upload_date': '20170805',
70 'format': 'bestvideo',
71 'skip_download': True,
74 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
75 'only_matching': True,
78 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
79 'only_matching': True,
82 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
83 'only_matching': True,
86 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
87 'only_matching': True,
90 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
91 'only_matching': True,
93 # reddit video @ nm reddit
94 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
95 'only_matching': True,
97 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
98 'only_matching': True,
102 def _gen_session_id():
104 rand_max
= 1 << (id_length
* 4)
105 return '%0.*x' % (id_length
, random
.randrange(rand_max
))
107 def _real_extract(self
, url
):
108 subdomain
, slug
, video_id
= self
._match
_valid
_url
(url
).group('subdomain', 'slug', 'id')
110 self
._set
_cookie
('.reddit.com', 'reddit_session', self
._gen
_session
_id
())
111 self
._set
_cookie
('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
112 data
= self
._download
_json
(f
'https://{subdomain}reddit.com/r/{slug}/.json', video_id
, fatal
=False)
114 # Fall back to old.reddit.com in case the requested subdomain fails
115 data
= self
._download
_json
(f
'https://old.reddit.com/r/{slug}/.json', video_id
)
116 data
= data
[0]['data']['children'][0]['data']
117 video_url
= data
['url']
119 # Avoid recursing into the same reddit URL
120 if 'reddit.com/' in video_url
and '/%s/' % video_id
in video_url
:
121 raise ExtractorError('No media found', expected
=True)
123 over_18
= data
.get('over_18')
126 elif over_18
is False:
133 def add_thumbnail(src
):
134 if not isinstance(src
, dict):
136 thumbnail_url
= url_or_none(src
.get('url'))
137 if not thumbnail_url
:
140 'url': unescapeHTML(thumbnail_url
),
141 'width': int_or_none(src
.get('width')),
142 'height': int_or_none(src
.get('height')),
145 for image
in try_get(data
, lambda x
: x
['preview']['images']) or []:
146 if not isinstance(image
, dict):
148 add_thumbnail(image
.get('source'))
149 resolutions
= image
.get('resolutions')
150 if isinstance(resolutions
, list):
151 for resolution
in resolutions
:
152 add_thumbnail(resolution
)
155 '_type': 'url_transparent',
157 'title': data
.get('title'),
158 'thumbnails': thumbnails
,
159 'timestamp': float_or_none(data
.get('created_utc')),
160 'uploader': data
.get('author'),
161 'duration': int_or_none(try_get(
163 (lambda x
: x
['media']['reddit_video']['duration'],
164 lambda x
: x
['secure_media']['reddit_video']['duration']))),
165 'like_count': int_or_none(data
.get('ups')),
166 'dislike_count': int_or_none(data
.get('downs')),
167 'comment_count': int_or_none(data
.get('num_comments')),
168 'age_limit': age_limit
,