]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/toypics.py
[misc] Add `hatch`, `ruff`, `pre-commit` and improve dev docs (#7409)
[yt-dlp.git] / yt_dlp / extractor / toypics.py
1 import re
2
3 from .common import InfoExtractor
4
5
6 class ToypicsIE(InfoExtractor):
7 _WORKING = False
8 IE_DESC = 'Toypics video'
9 _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)'
10 _TEST = {
11 'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
12 'md5': '16e806ad6d6f58079d210fe30985e08b',
13 'info_dict': {
14 'id': '514',
15 'ext': 'mp4',
16 'title': "Chance-Bulge'd, 2",
17 'age_limit': 18,
18 'uploader': 'kidsune',
19 }
20 }
21
22 def _real_extract(self, url):
23 video_id = self._match_id(url)
24
25 webpage = self._download_webpage(url, video_id)
26
27 formats = self._parse_html5_media_entries(
28 url, webpage, video_id)[0]['formats']
29 title = self._html_search_regex([
30 r'<h1[^>]+class=["\']view-video-title[^>]+>([^<]+)</h',
31 r'<title>([^<]+) - Toypics</title>',
32 ], webpage, 'title')
33
34 uploader = self._html_search_regex(
35 r'More videos from <strong>([^<]+)</strong>', webpage, 'uploader',
36 fatal=False)
37
38 return {
39 'id': video_id,
40 'formats': formats,
41 'title': title,
42 'uploader': uploader,
43 'age_limit': 18,
44 }
45
46
47 class ToypicsUserIE(InfoExtractor):
48 _WORKING = False
49 IE_DESC = 'Toypics user profile'
50 _VALID_URL = r'https?://videos\.toypics\.net/(?!view)(?P<id>[^/?#&]+)'
51 _TEST = {
52 'url': 'http://videos.toypics.net/Mikey',
53 'info_dict': {
54 'id': 'Mikey',
55 },
56 'playlist_mincount': 19,
57 }
58
59 def _real_extract(self, url):
60 username = self._match_id(url)
61
62 profile_page = self._download_webpage(
63 url, username, note='Retrieving profile page')
64
65 video_count = int(self._search_regex(
66 r'public/">Public Videos \(([0-9]+)\)</a></li>', profile_page,
67 'video count'))
68
69 PAGE_SIZE = 8
70 urls = []
71 page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE
72 for n in range(1, page_count + 1):
73 lpage_url = url + '/public/%d' % n
74 lpage = self._download_webpage(
75 lpage_url, username,
76 note='Downloading page %d/%d' % (n, page_count))
77 urls.extend(
78 re.findall(
79 r'<div[^>]+class=["\']preview[^>]+>\s*<a[^>]+href="(https?://videos\.toypics\.net/view/[^"]+)"',
80 lpage))
81
82 return {
83 '_type': 'playlist',
84 'id': username,
85 'entries': [{
86 '_type': 'url',
87 'url': eurl,
88 'ie_key': 'Toypics',
89 } for eurl in urls]
90 }