]>
jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/reddit.py
1 from __future__
import unicode_literals
4 from .common
import InfoExtractor
15 class RedditIE(InfoExtractor
):
16 _VALID_URL
= r
'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
18 # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
19 'url': 'https://v.redd.it/zv89llsvexdz',
20 'md5': '0a070c53eba7ec4534d95a5a1259e253',
24 'title': 'zv89llsvexdz',
27 'format': 'bestvideo',
31 def _real_extract(self
, url
):
32 video_id
= self
._match
_id
(url
)
34 formats
= self
._extract
_m
3u8_formats
(
35 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id
, video_id
,
36 'mp4', entry_protocol
='m3u8_native', m3u8_id
='hls', fatal
=False)
38 formats
.extend(self
._extract
_mpd
_formats
(
39 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id
, video_id
,
40 mpd_id
='dash', fatal
=False))
42 self
._sort
_formats
(formats
)
51 class RedditRIE(InfoExtractor
):
52 _VALID_URL
= r
'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
54 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
58 'title': 'That small heart attack.',
59 'thumbnail': r
're:^https?://.*\.(?:jpg|png)',
60 'thumbnails': 'count:4',
61 'timestamp': 1501941939,
62 'upload_date': '20170805',
71 'format': 'bestvideo',
72 'skip_download': True,
75 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
76 'only_matching': True,
79 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
80 'only_matching': True,
83 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
84 'only_matching': True,
87 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
88 'only_matching': True,
91 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
92 'only_matching': True,
94 # reddit video @ nm reddit
95 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
96 'only_matching': True,
99 def _real_extract(self
, url
):
100 mobj
= self
._match
_valid
_url
(url
)
101 url
, video_id
= mobj
.group('url', 'id')
103 video_id
= self
._match
_id
(url
)
105 data
= self
._download
_json
(
106 url
+ '/.json', video_id
)[0]['data']['children'][0]['data']
108 video_url
= data
['url']
110 # Avoid recursing into the same reddit URL
111 if 'reddit.com/' in video_url
and '/%s/' % video_id
in video_url
:
112 raise ExtractorError('No media found', expected
=True)
114 over_18
= data
.get('over_18')
117 elif over_18
is False:
124 def add_thumbnail(src
):
125 if not isinstance(src
, dict):
127 thumbnail_url
= url_or_none(src
.get('url'))
128 if not thumbnail_url
:
131 'url': unescapeHTML(thumbnail_url
),
132 'width': int_or_none(src
.get('width')),
133 'height': int_or_none(src
.get('height')),
136 for image
in try_get(data
, lambda x
: x
['preview']['images']) or []:
137 if not isinstance(image
, dict):
139 add_thumbnail(image
.get('source'))
140 resolutions
= image
.get('resolutions')
141 if isinstance(resolutions
, list):
142 for resolution
in resolutions
:
143 add_thumbnail(resolution
)
146 '_type': 'url_transparent',
148 'title': data
.get('title'),
149 'thumbnails': thumbnails
,
150 'timestamp': float_or_none(data
.get('created_utc')),
151 'uploader': data
.get('author'),
152 'duration': int_or_none(try_get(
154 (lambda x
: x
['media']['reddit_video']['duration'],
155 lambda x
: x
['secure_media']['reddit_video']['duration']))),
156 'like_count': int_or_none(data
.get('ups')),
157 'dislike_count': int_or_none(data
.get('downs')),
158 'comment_count': int_or_none(data
.get('num_comments')),
159 'age_limit': age_limit
,