4 from .common
import InfoExtractor
22 class LinuxAcademyIE(InfoExtractor
):
25 (?:www\.)?linuxacademy\.com/cp/
27 courses/lesson/course/(?P<chapter_id>\d+)/lesson/(?P<lesson_id>\d+)|
28 modules/view/id/(?P<course_id>\d+)
32 'url': 'https://linuxacademy.com/cp/courses/lesson/course/7971/lesson/2/module/675',
36 'title': 'What Is Data Science',
37 'description': 'md5:c574a3c20607144fb36cb65bdde76c99',
38 'timestamp': int, # The timestamp and upload date changes
39 'upload_date': r
're:\d+',
43 'skip_download': True,
45 'skip': 'Requires Linux Academy account credentials',
47 'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2',
48 'only_matching': True,
50 'url': 'https://linuxacademy.com/cp/modules/view/id/154',
53 'title': 'AWS Certified Cloud Practitioner',
54 'description': 'md5:a68a299ca9bb98d41cca5abc4d4ce22c',
58 'skip': 'Requires Linux Academy account credentials',
60 'url': 'https://linuxacademy.com/cp/modules/view/id/39',
63 'title': 'Red Hat Certified Systems Administrator - RHCSA (EX200) Exam Prep (legacy)',
64 'description': 'md5:0f1d3369e90c3fb14a79813b863c902f',
68 'skip': 'Requires Linux Academy account credentials',
71 _AUTHORIZE_URL
= 'https://login.linuxacademy.com/authorize'
72 _ORIGIN_URL
= 'https://linuxacademy.com'
73 _CLIENT_ID
= 'KaWxNn1C2Gc7n83W9OFeXltd8Utb5vvx'
74 _NETRC_MACHINE
= 'linuxacademy'
76 def _perform_login(self
, username
, password
):
79 random
.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~')
82 webpage
, urlh
= self
._download
_webpage
_handle
(
83 self
._AUTHORIZE
_URL
, None, 'Downloading authorize page', query
={
84 'client_id': self
._CLIENT
_ID
,
85 'response_type': 'token id_token',
86 'response_mode': 'web_message',
87 'redirect_uri': self
._ORIGIN
_URL
,
88 'scope': 'openid email user_impersonation profile',
89 'audience': self
._ORIGIN
_URL
,
90 'state': random_string(),
91 'nonce': random_string(),
94 login_data
= self
._parse
_json
(
96 r
'atob\(\s*(["\'])(?P
<value
>(?
:(?
!\
1).)+)\
1', webpage,
97 'login info
', group='value
'), None,
98 transform_source=lambda x: compat_b64decode(x).decode('utf
-8')
102 'client_id
': self._CLIENT_ID,
103 'redirect_uri
': self._ORIGIN_URL,
104 'tenant
': 'lacausers
',
105 'connection
': 'Username
-Password
-ACG
-Proxy
',
106 'username
': username,
107 'password
': password,
111 login_state_url = urlh.geturl()
114 login_page = self._download_webpage(
115 'https
://login
.linuxacademy
.com
/usernamepassword
/login
', None,
116 'Downloading login page
', data=json.dumps(login_data).encode(),
118 'Content
-Type
': 'application
/json
',
119 'Origin
': 'https
://login
.linuxacademy
.com
',
120 'Referer
': login_state_url,
122 except ExtractorError as e:
123 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
124 error = self._parse_json(e.cause.read(), None)
125 message = error.get('description
') or error['code
']
126 raise ExtractorError(
127 '%s said
: %s' % (self.IE_NAME, message), expected=True)
130 callback_page, urlh = self._download_webpage_handle(
131 'https
://login
.linuxacademy
.com
/login
/callback
', None,
132 'Downloading callback page
',
133 data=urlencode_postdata(self._hidden_inputs(login_page)),
135 'Content
-Type
': 'application
/x
-www
-form
-urlencoded
',
136 'Origin
': 'https
://login
.linuxacademy
.com
',
137 'Referer
': login_state_url,
140 access_token = self._search_regex(
141 r'access_token
=([^
=&]+)', urlh.geturl(),
142 'access token
', default=None)
144 access_token = self._parse_json(
146 r'authorizationResponse\s
*=\s
*({.+?}
)\s
*;', callback_page,
147 'authorization response
'), None,
148 transform_source=js_to_json)['response
']['access_token
']
150 self._download_webpage(
151 'https
://linuxacademy
.com
/cp
/login
/tokenValidateLogin
/token
/%s'
152 % access_token, None, 'Downloading token validation page
')
154 def _real_extract(self, url):
155 mobj = self._match_valid_url(url)
156 chapter_id, lecture_id, course_id = mobj.group('chapter_id
', 'lesson_id
', 'course_id
')
157 item_id = course_id if course_id else '%s-%s' % (chapter_id, lecture_id)
159 webpage = self._download_webpage(url, item_id)
163 module = self._parse_json(
165 r'window\
.module\s
*=\s
*({(?:(?!}
;)[^
"]|"([^
"]|\\")*")+})\s*;', webpage, 'module'),
168 chapter_number = None
171 for item in module['items']:
172 if not isinstance(item, dict):
176 return (try_get(item, lambda x: x['type'][key], compat_str) or '').lower()
177 type_fields = (type_field('name'), type_field('slug'))
178 # Move to next module section
179 if 'section' in type_fields:
180 chapter = item.get('course_name')
181 chapter_id = item.get('course_module')
182 chapter_number = 1 if not chapter_number else chapter_number + 1
185 if 'lesson' not in type_fields:
187 lesson_url = urljoin(url, item.get('url'))
190 title = item.get('title') or item.get('lesson_name')
191 description = item.get('md_desc') or clean_html(item.get('description')) or clean_html(item.get('text'))
193 '_type': 'url_transparent',
195 'ie_key': LinuxAcademyIE.ie_key(),
197 'description': description,
198 'timestamp': unified_timestamp(item.get('date')) or unified_timestamp(item.get('created_on')),
199 'duration': parse_duration(item.get('duration')),
201 'chapter_id': chapter_id,
202 'chapter_number': chapter_number,
208 'title': module.get('title'),
209 'description': module.get('md_desc') or clean_html(module.get('desc')),
210 'duration': parse_duration(module.get('duration')),
214 m3u8_url = self._parse_json(
216 r'player\.playlist\s*=\s*(\[.+?\])\s*;', webpage, 'playlist'),
218 formats = self._extract_m3u8_formats(
219 m3u8_url, item_id, 'mp4', entry_protocol='m3u8_native',
225 lesson = self._parse_json(
227 (r'window\.lesson\s*=\s*({.+?})\s*;',
228 r'player\.lesson\s*=\s*({.+?})\s*;'),
229 webpage, 'lesson', default='{}'), item_id, fatal=False)
232 'title': lesson.get('lesson_name'),
233 'description': lesson.get('md_desc') or clean_html(lesson.get('desc')),
234 'timestamp': unified_timestamp(lesson.get('date')) or unified_timestamp(lesson.get('created_on')),
235 'duration': parse_duration(lesson.get('duration')),
237 if not info.get('title'):
238 info['title'] = self._search_regex(
239 (r'>Lecture\s*:\s*(?P<value>[^<]+)',
240 r'lessonName\s*=\s*(["\'])(?P
<value
>(?
:(?
!\
1).)+)\
1'), webpage,
241 'title
', group='value
')