5 from datetime
import datetime
7 from .common
import InfoExtractor
, SearchInfoExtractor
23 _API_BASE_URL
= 'https://prod-api-v2.production.rokfin.com/api/v2/public/'
26 class RokfinIE(InfoExtractor
):
27 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/(?P<id>(?P<type>post|stream)/\d+)'
28 _NETRC_MACHINE
= 'rokfin'
29 _AUTH_BASE
= 'https://secure.rokfin.com/auth/realms/rokfin-web/protocol/openid-connect'
30 _access_mgmt_tokens
= {} # OAuth 2.0: RFC 6749, Sec. 1.4-5
32 'url': 'https://www.rokfin.com/post/57548/Mitt-Romneys-Crazy-Solution-To-Climate-Change',
36 'title': 'Mitt Romney\'s Crazy Solution To Climate Change',
37 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
38 'upload_date': '20211023',
39 'timestamp': 1634998029,
40 'channel': 'Jimmy Dore',
42 'channel_url': 'https://rokfin.com/TheJimmyDoreShow',
44 'availability': 'public',
45 'live_status': 'not_live',
50 'url': 'https://rokfin.com/post/223/Julian-Assange-Arrested-Streaming-In-Real-Time',
54 'title': 'Julian Assange Arrested: Streaming In Real Time',
55 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
56 'upload_date': '20190412',
57 'timestamp': 1555052644,
58 'channel': 'Ron Placone',
60 'channel_url': 'https://rokfin.com/RonPlacone',
61 'availability': 'public',
62 'live_status': 'not_live',
65 'tags': ['FreeThinkingMedia^', 'RealProgressives^'],
68 'url': 'https://www.rokfin.com/stream/10543/Its-A-Crazy-Mess-Regional-Director-Blows-Whistle-On-Pfizers-Vaccine-Trial-Data',
72 'title': '"It\'s A Crazy Mess" Regional Director Blows Whistle On Pfizer\'s Vaccine Trial Data',
73 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
74 'description': 'md5:324ce2d3e3b62e659506409e458b9d8e',
75 'channel': 'Ryan Cristián',
77 'channel_url': 'https://rokfin.com/TLAVagabond',
78 'availability': 'public',
81 'live_status': 'was_live',
82 'timestamp': 1635874720,
83 'release_timestamp': 1635874720,
84 'release_date': '20211102',
85 'upload_date': '20211102',
88 'tags': ['FreeThinkingMedia^'],
92 def _real_extract(self
, url
):
93 video_id
, video_type
= self
._match
_valid
_url
(url
).group('id', 'type')
94 metadata
= self
._download
_json
_using
_access
_token
(f
'{_API_BASE_URL}{video_id}', video_id
)
96 scheduled
= unified_timestamp(metadata
.get('scheduledAt'))
97 live_status
= ('was_live' if metadata
.get('stoppedAt')
98 else 'is_upcoming' if scheduled
99 else 'is_live' if video_type
== 'stream'
102 video_url
= traverse_obj(metadata
, 'url', ('content', 'contentUrl'), expected_type
=url_or_none
)
103 formats
, subtitles
= [{'url': video_url}
] if video_url
else [], {}
104 if determine_ext(video_url
) == 'm3u8':
105 formats
, subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
106 video_url
, video_id
, fatal
=False, live
=live_status
== 'is_live')
109 if traverse_obj(metadata
, 'premiumPlan', 'premium'):
110 self
.raise_login_required('This video is only available to premium users', True, method
='cookies')
112 self
.raise_no_formats(
113 f
'Stream is offline; sheduled for {datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
114 video_id
=video_id
, expected
=True)
115 self
._sort
_formats
(formats
)
117 uploader
= traverse_obj(metadata
, ('createdBy', 'username'), ('creator', 'username'))
118 timestamp
= (scheduled
or float_or_none(metadata
.get('postedAtMilli'), 1000)
119 or unified_timestamp(metadata
.get('creationDateTime')))
123 'subtitles': subtitles
,
124 'title': str_or_none(traverse_obj(metadata
, 'title', ('content', 'contentTitle'))),
125 'duration': float_or_none(traverse_obj(metadata
, ('content', 'duration'))),
126 'thumbnail': url_or_none(traverse_obj(metadata
, 'thumbnail', ('content', 'thumbnailUrl1'))),
127 'description': str_or_none(traverse_obj(metadata
, 'description', ('content', 'contentDescription'))),
128 'like_count': int_or_none(metadata
.get('likeCount')),
129 'dislike_count': int_or_none(metadata
.get('dislikeCount')),
130 'channel': str_or_none(traverse_obj(metadata
, ('createdBy', 'name'), ('creator', 'name'))),
131 'channel_id': traverse_obj(metadata
, ('createdBy', 'id'), ('creator', 'id')),
132 'channel_url': url_or_none(f
'https://rokfin.com/{uploader}') if uploader
else None,
133 'timestamp': timestamp
,
134 'release_timestamp': timestamp
if live_status
!= 'not_live' else None,
135 'tags': traverse_obj(metadata
, ('tags', ..., 'title'), expected_type
=str_or_none
),
136 'live_status': live_status
,
137 'availability': self
._availability
(
138 needs_premium
=bool(traverse_obj(metadata
, 'premiumPlan', 'premium')),
139 is_private
=False, needs_subscription
=False, needs_auth
=False, is_unlisted
=False),
140 # 'comment_count': metadata.get('numComments'), # Data provided by website is wrong
141 '__post_extractor': self
.extract_comments(video_id
) if video_type
== 'post' else None,
144 def _get_comments(self
, video_id
):
146 for page_n
in itertools
.count():
147 raw_comments
= self
._download
_json
(
148 f
'{_API_BASE_URL}comment?postId={video_id[5:]}&page={page_n}&size=50',
149 video_id
, note
=f
'Downloading viewer comments page {page_n + 1}{format_field(pages_total, template=" of %s")}',
152 for comment
in raw_comments
.get('content') or []:
154 'text': str_or_none(comment
.get('comment')),
155 'author': str_or_none(comment
.get('name')),
156 'id': comment
.get('commentId'),
157 'author_id': comment
.get('userId'),
159 'like_count': int_or_none(comment
.get('numLikes')),
160 'dislike_count': int_or_none(comment
.get('numDislikes')),
161 'timestamp': unified_timestamp(comment
.get('postedAt'))
164 pages_total
= int_or_none(raw_comments
.get('totalPages')) or None
165 is_last
= raw_comments
.get('last')
166 if not raw_comments
.get('content') or is_last
or (page_n
> pages_total
if pages_total
else is_last
is not False):
169 def _perform_login(self
, username
, password
):
170 # https://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth (Sec. 3.1)
171 login_page
= self
._download
_webpage
(
172 f
'{self._AUTH_BASE}/auth?client_id=web&redirect_uri=https%3A%2F%2Frokfin.com%2Ffeed&response_mode=fragment&response_type=code&scope=openid',
173 None, note
='loading login page', errnote
='error loading login page')
174 authentication_point_url
= unescapeHTML(self
._search
_regex
(
175 r
'<form\s+[^>]+action\s*=\s*"(https://secure\.rokfin\.com/auth/realms/rokfin-web/login-actions/authenticate\?[^"]+)"',
176 login_page
, name
='Authentication URL'))
178 resp_body
= self
._download
_webpage
(
179 authentication_point_url
, None, note
='logging in', fatal
=False, expected_status
=404,
180 data
=urlencode_postdata({'username': username, 'password': password, 'rememberMe': 'off', 'credentialId': ''}
))
181 if not self
._authentication
_active
():
182 if re
.search(r
'(?i)(invalid\s+username\s+or\s+password)', resp_body
or ''):
183 raise ExtractorError('invalid username/password', expected
=True)
184 raise ExtractorError('Login failed')
186 urlh
= self
._request
_webpage
(
187 f
'{self._AUTH_BASE}/auth', None,
188 note
='granting user authorization', errnote
='user authorization rejected by Rokfin',
192 'redirect_uri': 'https://rokfin.com/silent-check-sso.html',
193 'response_mode': 'fragment',
194 'response_type': 'code',
197 self
._access
_mgmt
_tokens
= self
._download
_json
(
198 f
'{self._AUTH_BASE}/token', None,
199 note
='getting access credentials', errnote
='error getting access credentials',
200 data
=urlencode_postdata({
201 'code': urllib
.parse
.parse_qs(urllib
.parse
.urldefrag(urlh
.geturl()).fragment
).get('code')[0],
203 'grant_type': 'authorization_code',
204 'redirect_uri': 'https://rokfin.com/silent-check-sso.html'
207 def _authentication_active(self
):
209 {'KEYCLOAK_IDENTITY', 'KEYCLOAK_IDENTITY_LEGACY', 'KEYCLOAK_SESSION', 'KEYCLOAK_SESSION_LEGACY'}
210 - set(self
._get
_cookies
(self
._AUTH
_BASE
)))
212 def _get_auth_token(self
):
213 return try_get(self
._access
_mgmt
_tokens
, lambda x
: ' '.join([x
['token_type'], x
['access_token']]))
215 def _download_json_using_access_token(self
, url_or_request
, video_id
, headers
={}, query={}
):
216 assert 'authorization' not in headers
217 headers
= headers
.copy()
218 auth_token
= self
._get
_auth
_token
()
219 refresh_token
= self
._access
_mgmt
_tokens
.get('refresh_token')
221 headers
['authorization'] = auth_token
223 json_string
, urlh
= self
._download
_webpage
_handle
(
224 url_or_request
, video_id
, headers
=headers
, query
=query
, expected_status
=401)
225 if not auth_token
or urlh
.code
!= 401 or refresh_token
is None:
226 return self
._parse
_json
(json_string
, video_id
)
228 self
._access
_mgmt
_tokens
= self
._download
_json
(
229 f
'{self._AUTH_BASE}/token', video_id
,
230 note
='User authorization expired or canceled by Rokfin. Re-authorizing ...', errnote
='Failed to re-authorize',
231 data
=urlencode_postdata({
232 'grant_type': 'refresh_token',
233 'refresh_token': refresh_token
,
236 headers
['authorization'] = self
._get
_auth
_token
()
237 if headers
['authorization'] is None:
238 raise ExtractorError('User authorization lost', expected
=True)
240 return self
._download
_json
(url_or_request
, video_id
, headers
=headers
, query
=query
)
243 class RokfinPlaylistBaseIE(InfoExtractor
):
248 'dead_stream': 'stream',
252 def _get_video_data(self
, metadata
):
253 for content
in metadata
.get('content') or []:
254 media_type
= self
._TYPES
.get(content
.get('mediaType'))
255 video_id
= content
.get('id') if media_type
== 'post' else content
.get('mediaId')
256 if not media_type
or not video_id
:
259 yield self
.url_result(f
'https://rokfin.com/{media_type}/{video_id}', video_id
=f
'{media_type}/{video_id}',
260 video_title
=str_or_none(traverse_obj(content
, ('content', 'contentTitle'))))
263 class RokfinStackIE(RokfinPlaylistBaseIE
):
264 IE_NAME
= 'rokfin:stack'
265 IE_DESC
= 'Rokfin Stacks'
266 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/stack/(?P<id>[^/]+)'
268 'url': 'https://www.rokfin.com/stack/271/Tulsi-Gabbard-Portsmouth-Townhall-FULL--Feb-9-2020',
275 def _real_extract(self
, url
):
276 list_id
= self
._match
_id
(url
)
277 return self
.playlist_result(self
._get
_video
_data
(
278 self
._download
_json
(f
'{_API_BASE_URL}stack/{list_id}', list_id
)), list_id
)
281 class RokfinChannelIE(RokfinPlaylistBaseIE
):
282 IE_NAME
= 'rokfin:channel'
283 IE_DESC
= 'Rokfin Channels'
284 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/(?!((feed/?)|(discover/?)|(channels/?))$)(?P<id>[^/]+)/?$'
286 'url': 'https://rokfin.com/TheConvoCouch',
287 'playlist_mincount': 100,
290 'title': 'TheConvoCouch - New',
291 'description': 'md5:bb622b1bca100209b91cd685f7847f06',
304 def _real_initialize(self
):
305 self
._validate
_extractor
_args
()
307 def _validate_extractor_args(self
):
308 requested_tabs
= self
._configuration
_arg
('tab', None)
309 if requested_tabs
is not None and (len(requested_tabs
) > 1 or requested_tabs
[0] not in self
._TABS
):
310 raise ExtractorError(f
'Invalid extractor-arg "tab". Must be one of {", ".join(self._TABS)}', expected
=True)
312 def _entries(self
, channel_id
, channel_name
, tab
):
314 for page_n
in itertools
.count(0):
315 if tab
in ('posts', 'top'):
316 data_url
= f
'{_API_BASE_URL}user/{channel_name}/{tab}?page={page_n}&size=50'
318 data_url
= f
'{_API_BASE_URL}post/search/{tab}?page={page_n}&size=50&creator={channel_id}'
319 metadata
= self
._download
_json
(
320 data_url
, channel_name
,
321 note
=f
'Downloading video metadata page {page_n + 1}{format_field(pages_total, template=" of %s")}')
323 yield from self
._get
_video
_data
(metadata
)
324 pages_total
= int_or_none(metadata
.get('totalPages')) or None
325 is_last
= metadata
.get('last')
326 if is_last
or (page_n
> pages_total
if pages_total
else is_last
is not False):
329 def _real_extract(self
, url
):
330 channel_name
= self
._match
_id
(url
)
331 channel_info
= self
._download
_json
(f
'{_API_BASE_URL}user/{channel_name}', channel_name
)
332 channel_id
= channel_info
['id']
333 tab
= self
._configuration
_arg
('tab', default
=['new'])[0]
335 return self
.playlist_result(
336 self
._entries
(channel_id
, channel_name
, self
._TABS
[tab
]),
337 f
'{channel_id}-{tab}', f
'{channel_name} - {tab.title()}', str_or_none(channel_info
.get('description')))
340 class RokfinSearchIE(SearchInfoExtractor
):
341 IE_NAME
= 'rokfin:search'
342 IE_DESC
= 'Rokfin Search'
343 _SEARCH_KEY
= 'rkfnsearch'
345 'video': (('id', 'raw'), 'post'),
346 'audio': (('id', 'raw'), 'post'),
347 'stream': (('content_id', 'raw'), 'stream'),
348 'dead_stream': (('content_id', 'raw'), 'stream'),
349 'stack': (('content_id', 'raw'), 'stack'),
352 'url': 'rkfnsearch5:"zelenko"',
356 'title': '"zelenko"',
360 _db_access_key
= None
362 def _real_initialize(self
):
363 self
._db
_url
, self
._db
_access
_key
= self
._downloader
.cache
.load(self
.ie_key(), 'auth', default
=(None, None))
365 self
._get
_db
_access
_credentials
()
367 def _search_results(self
, query
):
369 for page_number
in itertools
.count(1):
370 search_results
= self
._run
_search
_query
(
371 query
, data
={'query': query, 'page': {'size': 100, 'current': page_number}
},
372 note
=f
'Downloading page {page_number}{format_field(total_pages, template=" of ~%s")}')
373 total_pages
= traverse_obj(search_results
, ('meta', 'page', 'total_pages'), expected_type
=int_or_none
)
375 for result
in search_results
.get('results') or []:
376 video_id_key
, video_type
= self
._TYPES
.get(traverse_obj(result
, ('content_type', 'raw')), (None, None))
377 video_id
= traverse_obj(result
, video_id_key
, expected_type
=int_or_none
)
378 if video_id
and video_type
:
379 yield self
.url_result(url
=f
'https://rokfin.com/{video_type}/{video_id}')
380 if not search_results
.get('results'):
383 def _run_search_query(self
, video_id
, data
, **kwargs
):
384 data
= json
.dumps(data
).encode()
385 for attempt
in range(2):
386 search_results
= self
._download
_json
(
387 self
._db
_url
, video_id
, data
=data
, fatal
=(attempt
== 1),
388 headers
={'authorization': self._db_access_key}
, **kwargs
)
390 return search_results
391 self
.write_debug('Updating access credentials')
392 self
._get
_db
_access
_credentials
(video_id
)
394 def _get_db_access_credentials(self
, video_id
=None):
395 auth_data
= {'SEARCH_KEY': None, 'ENDPOINT_BASE': None}
396 notfound_err_page
= self
._download
_webpage
(
397 'https://rokfin.com/discover', video_id
, expected_status
=404, note
='Downloading home page')
398 for js_file_path
in re
.findall(r
'<script\b[^>]*\ssrc\s*=\s*"(/static/js/[^">]+)"', notfound_err_page
):
399 js_content
= self
._download
_webpage
(
400 f
'https://rokfin.com{js_file_path}', video_id
, note
='Downloading JavaScript file', fatal
=False)
401 auth_data
.update(re
.findall(
402 rf
'REACT_APP_({"|".join(auth_data.keys())})\s*:\s*"([^"]+)"', js_content
or ''))
403 if not all(auth_data
.values()):
406 self
._db
_url
= url_or_none(f
'{auth_data["ENDPOINT_BASE"]}/api/as/v1/engines/rokfin-search/search.json')
407 self
._db
_access
_key
= f
'Bearer {auth_data["SEARCH_KEY"]}'
408 self
._downloader
.cache
.store(self
.ie_key(), 'auth', (self
._db
_url
, self
._db
_access
_key
))
410 raise ExtractorError('Unable to extract access credentials')