]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/teamtreehouse.py
[extractor] Add `_perform_login` function (#2943)
[yt-dlp.git] / yt_dlp / extractor / teamtreehouse.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 clean_html,
9 determine_ext,
10 ExtractorError,
11 float_or_none,
12 get_element_by_class,
13 get_element_by_id,
14 parse_duration,
15 remove_end,
16 urlencode_postdata,
17 urljoin,
18 )
19
20
21 class TeamTreeHouseIE(InfoExtractor):
22 _VALID_URL = r'https?://(?:www\.)?teamtreehouse\.com/library/(?P<id>[^/]+)'
23 _TESTS = [{
24 # Course
25 'url': 'https://teamtreehouse.com/library/introduction-to-user-authentication-in-php',
26 'info_dict': {
27 'id': 'introduction-to-user-authentication-in-php',
28 'title': 'Introduction to User Authentication in PHP',
29 'description': 'md5:405d7b4287a159b27ddf30ca72b5b053',
30 },
31 'playlist_mincount': 24,
32 }, {
33 # WorkShop
34 'url': 'https://teamtreehouse.com/library/deploying-a-react-app',
35 'info_dict': {
36 'id': 'deploying-a-react-app',
37 'title': 'Deploying a React App',
38 'description': 'md5:10a82e3ddff18c14ac13581c9b8e5921',
39 },
40 'playlist_mincount': 4,
41 }, {
42 # Video
43 'url': 'https://teamtreehouse.com/library/application-overview-2',
44 'info_dict': {
45 'id': 'application-overview-2',
46 'ext': 'mp4',
47 'title': 'Application Overview',
48 'description': 'md5:4b0a234385c27140a4378de5f1e15127',
49 },
50 'expected_warnings': ['This is just a preview'],
51 }]
52 _NETRC_MACHINE = 'teamtreehouse'
53
54 def _perform_login(self, username, password):
55
56 signin_page = self._download_webpage(
57 'https://teamtreehouse.com/signin',
58 None, 'Downloading signin page')
59 data = self._form_hidden_inputs('new_user_session', signin_page)
60 data.update({
61 'user_session[email]': username,
62 'user_session[password]': password,
63 })
64 error_message = get_element_by_class('error-message', self._download_webpage(
65 'https://teamtreehouse.com/person_session',
66 None, 'Logging in', data=urlencode_postdata(data)))
67 if error_message:
68 raise ExtractorError(clean_html(error_message), expected=True)
69
70 def _real_extract(self, url):
71 display_id = self._match_id(url)
72 webpage = self._download_webpage(url, display_id)
73 title = self._html_search_meta(['og:title', 'twitter:title'], webpage)
74 description = self._html_search_meta(
75 ['description', 'og:description', 'twitter:description'], webpage)
76 entries = self._parse_html5_media_entries(url, webpage, display_id)
77 if entries:
78 info = entries[0]
79
80 for subtitles in info.get('subtitles', {}).values():
81 for subtitle in subtitles:
82 subtitle['ext'] = determine_ext(subtitle['url'], 'srt')
83
84 is_preview = 'data-preview="true"' in webpage
85 if is_preview:
86 self.report_warning(
87 'This is just a preview. You need to be signed in with a Basic account to download the entire video.', display_id)
88 duration = 30
89 else:
90 duration = float_or_none(self._search_regex(
91 r'data-duration="(\d+)"', webpage, 'duration'), 1000)
92 if not duration:
93 duration = parse_duration(get_element_by_id(
94 'video-duration', webpage))
95
96 info.update({
97 'id': display_id,
98 'title': title,
99 'description': description,
100 'duration': duration,
101 })
102 return info
103 else:
104 def extract_urls(html, extract_info=None):
105 for path in re.findall(r'<a[^>]+href="([^"]+)"', html):
106 page_url = urljoin(url, path)
107 entry = {
108 '_type': 'url_transparent',
109 'id': self._match_id(page_url),
110 'url': page_url,
111 'id_key': self.ie_key(),
112 }
113 if extract_info:
114 entry.update(extract_info)
115 entries.append(entry)
116
117 workshop_videos = self._search_regex(
118 r'(?s)<ul[^>]+id="workshop-videos"[^>]*>(.+?)</ul>',
119 webpage, 'workshop videos', default=None)
120 if workshop_videos:
121 extract_urls(workshop_videos)
122 else:
123 stages_path = self._search_regex(
124 r'(?s)<div[^>]+id="syllabus-stages"[^>]+data-url="([^"]+)"',
125 webpage, 'stages path')
126 if stages_path:
127 stages_page = self._download_webpage(
128 urljoin(url, stages_path), display_id, 'Downloading stages page')
129 for chapter_number, (chapter, steps_list) in enumerate(re.findall(r'(?s)<h2[^>]*>\s*(.+?)\s*</h2>.+?<ul[^>]*>(.+?)</ul>', stages_page), 1):
130 extract_urls(steps_list, {
131 'chapter': chapter,
132 'chapter_number': chapter_number,
133 })
134 title = remove_end(title, ' Course')
135
136 return self.playlist_result(
137 entries, display_id, title, description)