]>
jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/reddit.py
1 from __future__
import unicode_literals
5 from .common
import InfoExtractor
16 class RedditIE(InfoExtractor
):
17 _VALID_URL
= r
'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
19 # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
20 'url': 'https://v.redd.it/zv89llsvexdz',
21 'md5': '0a070c53eba7ec4534d95a5a1259e253',
25 'title': 'zv89llsvexdz',
28 'format': 'bestvideo',
32 def _real_extract(self
, url
):
33 video_id
= self
._match
_id
(url
)
35 formats
= self
._extract
_m
3u8_formats
(
36 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id
, video_id
,
37 'mp4', entry_protocol
='m3u8_native', m3u8_id
='hls', fatal
=False)
39 formats
.extend(self
._extract
_mpd
_formats
(
40 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id
, video_id
,
41 mpd_id
='dash', fatal
=False))
43 self
._sort
_formats
(formats
)
52 class RedditRIE(InfoExtractor
):
53 _VALID_URL
= r
'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
55 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
59 'title': 'That small heart attack.',
60 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
61 'thumbnails': 'count:4',
62 'timestamp': 1501941939,
63 'upload_date': '20170805',
72 'format': 'bestvideo',
73 'skip_download': True,
76 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
77 'only_matching': True,
80 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
81 'only_matching': True,
84 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
85 'only_matching': True,
88 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
89 'only_matching': True,
92 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
93 'only_matching': True,
95 # reddit video @ nm reddit
96 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
97 'only_matching': True,
100 def _real_extract(self
, url
):
101 mobj
= re
.match(self
._VALID
_URL
, url
)
102 url
, video_id
= mobj
.group('url', 'id')
104 video_id
= self
._match
_id
(url
)
106 data
= self
._download
_json
(
107 url
+ '/.json', video_id
)[0]['data']['children'][0]['data']
109 video_url
= data
['url']
111 # Avoid recursing into the same reddit URL
112 if 'reddit.com/' in video_url
and '/%s/' % video_id
in video_url
:
113 raise ExtractorError('No media found', expected
=True)
115 over_18
= data
.get('over_18')
118 elif over_18
is False:
125 def add_thumbnail(src
):
126 if not isinstance(src
, dict):
128 thumbnail_url
= url_or_none(src
.get('url'))
129 if not thumbnail_url
:
132 'url': unescapeHTML(thumbnail_url
),
133 'width': int_or_none(src
.get('width')),
134 'height': int_or_none(src
.get('height')),
137 for image
in try_get(data
, lambda x
: x
['preview']['images']) or []:
138 if not isinstance(image
, dict):
140 add_thumbnail(image
.get('source'))
141 resolutions
= image
.get('resolutions')
142 if isinstance(resolutions
, list):
143 for resolution
in resolutions
:
144 add_thumbnail(resolution
)
147 '_type': 'url_transparent',
149 'title': data
.get('title'),
150 'thumbnails': thumbnails
,
151 'timestamp': float_or_none(data
.get('created_utc')),
152 'uploader': data
.get('author'),
153 'duration': int_or_none(try_get(
155 (lambda x
: x
['media']['reddit_video']['duration'],
156 lambda x
: x
['secure_media']['reddit_video']['duration']))),
157 'like_count': int_or_none(data
.get('ups')),
158 'dislike_count': int_or_none(data
.get('downs')),
159 'comment_count': int_or_none(data
.get('num_comments')),
160 'age_limit': age_limit
,