]> jfr.im git - yt-dlp.git/blame - youtube_dl/extractor/motherless.py
[motherless] Add support for groups
[yt-dlp.git] / youtube_dl / extractor / motherless.py
CommitLineData
a69969ee
TJ
1from __future__ import unicode_literals
2
3import datetime
4import re
5
6from .common import InfoExtractor
45283afd 7from ..compat import compat_urlparse
78ff59d0 8from ..utils import (
5c0a5718 9 ExtractorError,
45283afd
MW
10 InAdvancePagedList,
11 orderedSet,
8efd06aa 12 str_to_int,
78ff59d0
PP
13 unified_strdate,
14)
a69969ee
TJ
15
16
17class MotherlessIE(InfoExtractor):
5886b38d 18 _VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
d0459c53
S
19 _TESTS = [{
20 'url': 'http://motherless.com/AC3FFE1',
21 'md5': '310f62e325a9fafe64f68c0bccb6e75f',
22 'info_dict': {
23 'id': 'AC3FFE1',
24 'ext': 'mp4',
25 'title': 'Fucked in the ass while playing PS3',
26 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
27 'upload_date': '20100913',
28 'uploader_id': 'famouslyfuckedup',
ec85ded8 29 'thumbnail': r're:http://.*\.jpg',
d0459c53
S
30 'age_limit': 18,
31 }
32 }, {
33 'url': 'http://motherless.com/532291B',
34 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
35 'info_dict': {
36 'id': '532291B',
37 'ext': 'mp4',
38 'title': 'Amazing girl playing the omegle game, PERFECT!',
39 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen',
40 'game', 'hairy'],
41 'upload_date': '20140622',
42 'uploader_id': 'Sulivana7x',
ec85ded8 43 'thumbnail': r're:http://.*\.jpg',
d0459c53 44 'age_limit': 18,
43479d9e 45 },
d0459c53
S
46 'skip': '404',
47 }, {
48 'url': 'http://motherless.com/g/cosplay/633979F',
49 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
50 'info_dict': {
51 'id': '633979F',
52 'ext': 'mp4',
53 'title': 'Turtlette',
54 'categories': ['superheroine heroine superher'],
55 'upload_date': '20140827',
56 'uploader_id': 'shade0230',
ec85ded8 57 'thumbnail': r're:http://.*\.jpg',
d0459c53 58 'age_limit': 18,
a69969ee 59 }
d0459c53
S
60 }, {
61 # no keywords
62 'url': 'http://motherless.com/8B4BBC1',
63 'only_matching': True,
64 }]
a69969ee 65
8efd06aa
PH
66 def _real_extract(self, url):
67 video_id = self._match_id(url)
a69969ee
TJ
68 webpage = self._download_webpage(url, video_id)
69
5c0a5718
S
70 if any(p in webpage for p in (
71 '<title>404 - MOTHERLESS.COM<',
72 ">The page you're looking for cannot be found.<")):
73 raise ExtractorError('Video %s does not exist' % video_id, expected=True)
74
ff5873b7
S
75 if '>The content you are trying to view is for friends only.' in webpage:
76 raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
77
8efd06aa
PH
78 title = self._html_search_regex(
79 r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
80 video_url = self._html_search_regex(
81 r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video URL')
78ff59d0 82 age_limit = self._rta_search(webpage)
8efd06aa
PH
83 view_count = str_to_int(self._html_search_regex(
84 r'<strong>Views</strong>\s+([^<]+)<',
85 webpage, 'view count', fatal=False))
86 like_count = str_to_int(self._html_search_regex(
87 r'<strong>Favorited</strong>\s+([^<]+)<',
88 webpage, 'like count', fatal=False))
5f6a1245 89
8efd06aa
PH
90 upload_date = self._html_search_regex(
91 r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
78ff59d0
PP
92 if 'Ago' in upload_date:
93 days = int(re.search(r'([0-9]+)', upload_date).group(1))
94 upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
95 else:
96 upload_date = unified_strdate(upload_date)
97
a69969ee 98 comment_count = webpage.count('class="media-comment-contents"')
8efd06aa
PH
99 uploader_id = self._html_search_regex(
100 r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
101 webpage, 'uploader_id')
a69969ee 102
43479d9e 103 categories = self._html_search_meta('keywords', webpage, default=None)
78ff59d0 104 if categories:
a69969ee
TJ
105 categories = [cat.strip() for cat in categories.split(',')]
106
a69969ee
TJ
107 return {
108 'id': video_id,
109 'title': title,
110 'upload_date': upload_date,
111 'uploader_id': uploader_id,
78ff59d0 112 'thumbnail': self._og_search_thumbnail(webpage),
a69969ee 113 'categories': categories,
8efd06aa
PH
114 'view_count': view_count,
115 'like_count': like_count,
a69969ee
TJ
116 'comment_count': comment_count,
117 'age_limit': age_limit,
118 'url': video_url,
119 }
45283afd
MW
120
121
122class MotherlessGroupIE(InfoExtractor):
123 _VALID_URL = 'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)'
124 _TESTS = [{
125 'url': 'http://motherless.com/g/movie_scenes',
126 'info_dict': {
127 'id': 'movie_scenes',
128 'title': 'Movie Scenes',
129 'description': 'Hot and sexy scenes from "regular" movies... '
130 'Beautiful actresses fully nude... A looot of '
131 'skin! :)Enjoy!',
132 },
133 'playlist_mincount': 662,
134 }, {
135 'url': 'http://motherless.com/gv/sex_must_be_funny',
136 'info_dict': {
137 'id': 'sex_must_be_funny',
138 'title': 'Sex must be funny',
139 'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
140 'any kind!'
141 },
142 'playlist_mincount': 9,
143 }]
144
145 @classmethod
146 def suitable(cls, url):
147 return (False if MotherlessIE.suitable(url)
148 else super(MotherlessGroupIE, cls).suitable(url))
149
150 def _extract_entries(self, webpage, base):
151 return [
152 self.url_result(
153 compat_urlparse.urljoin(base, video_path),
154 MotherlessIE.ie_key(), video_title=title)
155 for video_path, title in orderedSet(re.findall(
156 r'href="/([^"]+)"[^>]+>\s+<img[^>]+alt="[^-]+-\s([^"]+)"',
157 webpage))
158 ]
159
160 def _real_extract(self, url):
161 group_id = self._match_id(url)
162 page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id)
163 webpage = self._download_webpage(page_url, group_id)
164 title = self._search_regex(
165 r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
166 description = self._html_search_meta(
167 'description', webpage, fatal=False)
168 page_count = self._int(self._search_regex(
169 r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT',
170 webpage, 'page_count'), 'page_count')
171 PAGE_SIZE = 80
172
173 def _get_page(idx):
174 webpage = self._download_webpage(
175 page_url, group_id, query={'page': idx + 1},
176 note='Downloading page %d/%d' % (idx + 1, page_count)
177 )
178 for entry in self._extract_entries(webpage, url):
179 yield entry
180
181 playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
182
183 return {
184 '_type': 'playlist',
185 'id': group_id,
186 'title': title,
187 'description': description,
188 'entries': playlist
189 }