]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/packtpub.py
[extractor/nebula] Add nebula.tv (#4918)
[yt-dlp.git] / yt_dlp / extractor / packtpub.py
CommitLineData
c56ad5c9 1import json
74539995
S
2
3from .common import InfoExtractor
5d0968f0 4from ..compat import (
25d71fb0 5 # compat_str,
5d0968f0
RA
6 compat_HTTPError,
7)
74539995
S
8from ..utils import (
9 clean_html,
10 ExtractorError,
25d71fb0
RA
11 # remove_end,
12 str_or_none,
74539995
S
13 strip_or_none,
14 unified_timestamp,
25d71fb0 15 # urljoin,
74539995
S
16)
17
18
19class PacktPubBaseIE(InfoExtractor):
25d71fb0
RA
20 # _PACKT_BASE = 'https://www.packtpub.com'
21 _STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/'
74539995
S
22
23
24class PacktPubIE(PacktPubBaseIE):
c9b0564a 25 _VALID_URL = r'https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>[^/]+)/(?P<id>[^/]+)(?:/(?P<display_id>[^/?&#]+))?'
74539995 26
751e0515 27 _TESTS = [{
74539995
S
28 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro',
29 'md5': '1e74bd6cfd45d7d07666f4684ef58f70',
30 'info_dict': {
31 'id': '20530',
32 'ext': 'mp4',
33 'title': 'Project Intro',
34 'thumbnail': r're:(?i)^https?://.*\.jpg',
35 'timestamp': 1490918400,
36 'upload_date': '20170331',
37 },
751e0515
S
38 }, {
39 'url': 'https://subscription.packtpub.com/video/web_development/9781787122215/20528/20530/project-intro',
40 'only_matching': True,
c9b0564a
RA
41 }, {
42 'url': 'https://subscription.packtpub.com/video/programming/9781838988906/p1/video1_1/business-card-project',
43 'only_matching': True,
751e0515 44 }]
5d0968f0
RA
45 _NETRC_MACHINE = 'packtpub'
46 _TOKEN = None
47
52efa4b3 48 def _perform_login(self, username, password):
5d0968f0
RA
49 try:
50 self._TOKEN = self._download_json(
25d71fb0 51 'https://services.packtpub.com/auth-v1/users/tokens', None,
c56ad5c9 52 'Downloading Authorization Token', data=json.dumps({
25d71fb0 53 'username': username,
c56ad5c9
RA
54 'password': password,
55 }).encode())['data']['access']
5d0968f0 56 except ExtractorError as e:
c56ad5c9 57 if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401, 404):
5d0968f0
RA
58 message = self._parse_json(e.cause.read().decode(), None)['message']
59 raise ExtractorError(message, expected=True)
60 raise
74539995 61
74539995 62 def _real_extract(self, url):
5ad28e7f 63 course_id, chapter_id, video_id, display_id = self._match_valid_url(url).groups()
74539995 64
5d0968f0
RA
65 headers = {}
66 if self._TOKEN:
c56ad5c9 67 headers['Authorization'] = 'Bearer ' + self._TOKEN
25d71fb0
RA
68 try:
69 video_url = self._download_json(
70 'https://services.packtpub.com/products-v1/products/%s/%s/%s' % (course_id, chapter_id, video_id), video_id,
71 'Downloading JSON video', headers=headers)['data']
72 except ExtractorError as e:
73 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
74 self.raise_login_required('This video is locked')
75 raise
74539995 76
25d71fb0
RA
77 # TODO: find a better way to avoid duplicating course requests
78 # metadata = self._download_json(
79 # '%s/products/%s/chapters/%s/sections/%s/metadata'
80 # % (self._MAPT_REST, course_id, chapter_id, video_id),
81 # video_id)['data']
74539995 82
25d71fb0
RA
83 # title = metadata['pageTitle']
84 # course_title = metadata.get('title')
85 # if course_title:
86 # title = remove_end(title, ' - %s' % course_title)
87 # timestamp = unified_timestamp(metadata.get('publicationDate'))
88 # thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath'))
74539995
S
89
90 return {
91 'id': video_id,
92 'url': video_url,
25d71fb0
RA
93 'title': display_id or video_id, # title,
94 # 'thumbnail': thumbnail,
95 # 'timestamp': timestamp,
74539995
S
96 }
97
98
99class PacktPubCourseIE(PacktPubBaseIE):
751e0515
S
100 _VALID_URL = r'(?P<url>https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<id>\d+))'
101 _TESTS = [{
74539995
S
102 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215',
103 'info_dict': {
104 'id': '9781787122215',
105 'title': 'Learn Nodejs by building 12 projects [Video]',
25d71fb0 106 'description': 'md5:489da8d953f416e51927b60a1c7db0aa',
74539995
S
107 },
108 'playlist_count': 90,
751e0515
S
109 }, {
110 'url': 'https://subscription.packtpub.com/video/web_development/9781787122215',
111 'only_matching': True,
112 }]
74539995
S
113
114 @classmethod
115 def suitable(cls, url):
116 return False if PacktPubIE.suitable(url) else super(
117 PacktPubCourseIE, cls).suitable(url)
118
119 def _real_extract(self, url):
5ad28e7f 120 mobj = self._match_valid_url(url)
74539995
S
121 url, course_id = mobj.group('url', 'id')
122
123 course = self._download_json(
25d71fb0
RA
124 self._STATIC_PRODUCTS_BASE + '%s/toc' % course_id, course_id)
125 metadata = self._download_json(
126 self._STATIC_PRODUCTS_BASE + '%s/summary' % course_id,
127 course_id, fatal=False) or {}
74539995
S
128
129 entries = []
25d71fb0
RA
130 for chapter_num, chapter in enumerate(course['chapters'], 1):
131 chapter_id = str_or_none(chapter.get('id'))
132 sections = chapter.get('sections')
133 if not chapter_id or not isinstance(sections, list):
74539995
S
134 continue
135 chapter_info = {
136 'chapter': chapter.get('title'),
137 'chapter_number': chapter_num,
25d71fb0 138 'chapter_id': chapter_id,
74539995 139 }
25d71fb0
RA
140 for section in sections:
141 section_id = str_or_none(section.get('id'))
142 if not section_id or section.get('contentType') != 'video':
74539995
S
143 continue
144 entry = {
145 '_type': 'url_transparent',
25d71fb0 146 'url': '/'.join([url, chapter_id, section_id]),
74539995
S
147 'title': strip_or_none(section.get('title')),
148 'description': clean_html(section.get('summary')),
25d71fb0
RA
149 'thumbnail': metadata.get('coverImage'),
150 'timestamp': unified_timestamp(metadata.get('publicationDate')),
74539995
S
151 'ie_key': PacktPubIE.ie_key(),
152 }
153 entry.update(chapter_info)
154 entries.append(entry)
155
25d71fb0
RA
156 return self.playlist_result(
157 entries, course_id, metadata.get('title'),
158 clean_html(metadata.get('about')))