]>
Commit | Line | Data |
---|---|---|
1 | # coding: utf-8 | |
2 | import re | |
3 | ||
4 | from .common import InfoExtractor | |
5 | from ..utils import ( | |
6 | ExtractorError, | |
7 | int_or_none, | |
8 | try_get, | |
9 | url_or_none, | |
10 | urlencode_postdata, | |
11 | ) | |
12 | ||
13 | ||
14 | class HiDiveIE(InfoExtractor): | |
15 | _VALID_URL = r'https?://(?:www\.)?hidive\.com/stream/(?P<id>(?P<title>[^/]+)/(?P<key>[^/?#&]+))' | |
16 | # Using X-Forwarded-For results in 403 HTTP error for HLS fragments, | |
17 | # so disabling geo bypass completely | |
18 | _GEO_BYPASS = False | |
19 | _NETRC_MACHINE = 'hidive' | |
20 | _LOGIN_URL = 'https://www.hidive.com/account/login' | |
21 | ||
22 | _TESTS = [{ | |
23 | 'url': 'https://www.hidive.com/stream/the-comic-artist-and-his-assistants/s01e001', | |
24 | 'info_dict': { | |
25 | 'id': 'the-comic-artist-and-his-assistants/s01e001', | |
26 | 'ext': 'mp4', | |
27 | 'title': 'the-comic-artist-and-his-assistants/s01e001', | |
28 | 'series': 'the-comic-artist-and-his-assistants', | |
29 | 'season_number': 1, | |
30 | 'episode_number': 1, | |
31 | }, | |
32 | 'params': { | |
33 | 'skip_download': True, | |
34 | }, | |
35 | 'skip': 'Requires Authentication', | |
36 | }] | |
37 | ||
38 | def _perform_login(self, username, password): | |
39 | webpage = self._download_webpage(self._LOGIN_URL, None) | |
40 | form = self._search_regex( | |
41 | r'(?s)<form[^>]+action="/account/login"[^>]*>(.+?)</form>', | |
42 | webpage, 'login form') | |
43 | data = self._hidden_inputs(form) | |
44 | data.update({ | |
45 | 'Email': username, | |
46 | 'Password': password, | |
47 | }) | |
48 | self._download_webpage( | |
49 | self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data)) | |
50 | ||
51 | def _call_api(self, video_id, title, key, data={}, **kwargs): | |
52 | data = { | |
53 | **data, | |
54 | 'Title': title, | |
55 | 'Key': key, | |
56 | 'PlayerId': 'f4f895ce1ca713ba263b91caeb1daa2d08904783', | |
57 | } | |
58 | return self._download_json( | |
59 | 'https://www.hidive.com/play/settings', video_id, | |
60 | data=urlencode_postdata(data), **kwargs) or {} | |
61 | ||
62 | def _extract_subtitles_from_rendition(self, rendition, subtitles, parsed_urls): | |
63 | for cc_file in rendition.get('ccFiles', []): | |
64 | cc_url = url_or_none(try_get(cc_file, lambda x: x[2])) | |
65 | # name is used since we cant distinguish subs with same language code | |
66 | cc_lang = try_get(cc_file, (lambda x: x[1].replace(' ', '-').lower(), lambda x: x[0]), str) | |
67 | if cc_url not in parsed_urls and cc_lang: | |
68 | parsed_urls.add(cc_url) | |
69 | subtitles.setdefault(cc_lang, []).append({'url': cc_url}) | |
70 | ||
71 | def _get_subtitles(self, url, video_id, title, key, parsed_urls): | |
72 | webpage = self._download_webpage(url, video_id, fatal=False) or '' | |
73 | subtitles = {} | |
74 | for caption in set(re.findall(r'data-captions=\"([^\"]+)\"', webpage)): | |
75 | renditions = self._call_api( | |
76 | video_id, title, key, {'Captions': caption}, fatal=False, | |
77 | note=f'Downloading {caption} subtitle information').get('renditions') or {} | |
78 | for rendition_id, rendition in renditions.items(): | |
79 | self._extract_subtitles_from_rendition(rendition, subtitles, parsed_urls) | |
80 | return subtitles | |
81 | ||
82 | def _real_extract(self, url): | |
83 | video_id, title, key = self._match_valid_url(url).group('id', 'title', 'key') | |
84 | settings = self._call_api(video_id, title, key) | |
85 | ||
86 | restriction = settings.get('restrictionReason') | |
87 | if restriction == 'RegionRestricted': | |
88 | self.raise_geo_restricted() | |
89 | if restriction and restriction != 'None': | |
90 | raise ExtractorError( | |
91 | '%s said: %s' % (self.IE_NAME, restriction), expected=True) | |
92 | ||
93 | formats, parsed_urls = [], {None} | |
94 | for rendition_id, rendition in settings['renditions'].items(): | |
95 | audio, version, extra = rendition_id.split('_') | |
96 | m3u8_url = url_or_none(try_get(rendition, lambda x: x['bitrates']['hls'])) | |
97 | if m3u8_url not in parsed_urls: | |
98 | parsed_urls.add(m3u8_url) | |
99 | frmt = self._extract_m3u8_formats( | |
100 | m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=rendition_id, fatal=False) | |
101 | for f in frmt: | |
102 | f['language'] = audio | |
103 | f['format_note'] = f'{version}, {extra}' | |
104 | formats.extend(frmt) | |
105 | self._sort_formats(formats) | |
106 | ||
107 | return { | |
108 | 'id': video_id, | |
109 | 'title': video_id, | |
110 | 'subtitles': self.extract_subtitles(url, video_id, title, key, parsed_urls), | |
111 | 'formats': formats, | |
112 | 'series': title, | |
113 | 'season_number': int_or_none( | |
114 | self._search_regex(r's(\d+)', key, 'season number', default=None)), | |
115 | 'episode_number': int_or_none( | |
116 | self._search_regex(r'e(\d+)', key, 'episode number', default=None)), | |
117 | 'http_headers': {'Referer': url} | |
118 | } |