5 from datetime
import datetime
7 from .common
import InfoExtractor
, SearchInfoExtractor
23 _API_BASE_URL
= 'https://prod-api-v2.production.rokfin.com/api/v2/public/'
26 class RokfinIE(InfoExtractor
):
27 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/(?P<id>(?P<type>post|stream)/\d+)'
28 _NETRC_MACHINE
= 'rokfin'
29 _AUTH_BASE
= 'https://secure.rokfin.com/auth/realms/rokfin-web/protocol/openid-connect'
30 _access_mgmt_tokens
= {} # OAuth 2.0: RFC 6749, Sec. 1.4-5
32 'url': 'https://www.rokfin.com/post/57548/Mitt-Romneys-Crazy-Solution-To-Climate-Change',
36 'title': 'Mitt Romney\'s Crazy Solution To Climate Change',
37 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
38 'upload_date': '20211023',
39 'timestamp': 1634998029,
40 'channel': 'Jimmy Dore',
42 'channel_url': 'https://rokfin.com/TheJimmyDoreShow',
43 'availability': 'public',
44 'live_status': 'not_live',
50 'url': 'https://rokfin.com/post/223/Julian-Assange-Arrested-Streaming-In-Real-Time',
54 'title': 'Julian Assange Arrested: Streaming In Real Time',
55 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
56 'upload_date': '20190412',
57 'timestamp': 1555052644,
58 'channel': 'Ron Placone',
60 'channel_url': 'https://rokfin.com/RonPlacone',
61 'availability': 'public',
62 'live_status': 'not_live',
65 'tags': ['FreeThinkingMedia^', 'RealProgressives^'],
68 'url': 'https://www.rokfin.com/stream/10543/Its-A-Crazy-Mess-Regional-Director-Blows-Whistle-On-Pfizers-Vaccine-Trial-Data',
72 'title': '"It\'s A Crazy Mess" Regional Director Blows Whistle On Pfizer\'s Vaccine Trial Data',
73 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
74 'description': 'md5:324ce2d3e3b62e659506409e458b9d8e',
75 'channel': 'TLAVagabond',
77 'channel_url': 'https://rokfin.com/TLAVagabond',
78 'availability': 'public',
81 'live_status': 'was_live',
82 'timestamp': 1635874720,
83 'release_timestamp': 1635874720,
84 'release_date': '20211102',
85 'upload_date': '20211102',
88 'tags': ['FreeThinkingMedia^'],
92 'url': 'https://rokfin.com/post/126703/Brave-New-World--Aldous-Huxley-DEEPDIVE--Chpts-13--Quite-Frankly--Jay-Dyer',
96 'title': 'Brave New World - Aldous Huxley DEEPDIVE! (Chpts 1-3) - Quite Frankly & Jay Dyer',
97 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
98 'channel': 'Jay Dyer',
100 'channel_url': 'https://rokfin.com/jaydyer',
101 'availability': 'premium_only',
102 'live_status': 'not_live',
103 'dislike_count': int,
105 'timestamp': 1678213357,
106 'upload_date': '20230307',
107 'tags': ['FreeThinkingMedia^', 'OpenMind^'],
108 'description': 'md5:cb04e32e68326c9b2b251b297bacff35',
112 'url': 'https://rokfin.com/stream/31332/The-Grayzone-live-on-Nordstream-blame-game',
114 'id': 'stream/31332',
116 'title': 'The Grayzone live on Nordstream blame game',
117 'thumbnail': r
're:https://image\.v\.rokfin\.com/.+',
118 'channel': 'Max Blumenthal',
119 'channel_id': 248902,
120 'channel_url': 'https://rokfin.com/MaxBlumenthal',
121 'availability': 'premium_only',
122 'live_status': 'was_live',
123 'dislike_count': int,
125 'timestamp': 1678475166,
126 'release_timestamp': 1678475166.0,
127 'release_date': '20230310',
128 'upload_date': '20230310',
129 'tags': ['FreeThinkingMedia^'],
133 def _real_extract(self
, url
):
134 video_id
, video_type
= self
._match
_valid
_url
(url
).group('id', 'type')
135 metadata
= self
._download
_json
_using
_access
_token
(f
'{_API_BASE_URL}{video_id}', video_id
)
137 scheduled
= unified_timestamp(metadata
.get('scheduledAt'))
138 live_status
= ('was_live' if metadata
.get('stoppedAt')
139 else 'is_upcoming' if scheduled
140 else 'is_live' if video_type
== 'stream'
143 video_url
= traverse_obj(metadata
, 'url', ('content', 'contentUrl'), expected_type
=url_or_none
)
144 if video_url
in (None, 'fake.m3u8'):
145 video_url
= format_field(self
._search
_regex
(
146 r
'https?://[^/]+/([^/]+)/storyboard.vtt',
147 traverse_obj(metadata
, 'timelineUrl', ('content', 'timelineUrl'), expected_type
=url_or_none
),
148 video_id
, default
=None), None, 'https://stream.v.rokfin.com/%s.m3u8')
150 formats
, subtitles
= [{'url': video_url}
] if video_url
else [], {}
151 if determine_ext(video_url
) == 'm3u8':
152 formats
, subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
153 video_url
, video_id
, fatal
=False, live
=live_status
== 'is_live')
156 if traverse_obj(metadata
, 'premiumPlan', 'premium'):
157 self
.raise_login_required('This video is only available to premium users', True, method
='cookies')
159 self
.raise_no_formats(
160 f
'Stream is offline; scheduled for {datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
161 video_id
=video_id
, expected
=True)
163 uploader
= traverse_obj(metadata
, ('createdBy', 'username'), ('creator', 'username'))
164 timestamp
= (scheduled
or float_or_none(metadata
.get('postedAtMilli'), 1000)
165 or unified_timestamp(metadata
.get('creationDateTime')))
169 'subtitles': subtitles
,
170 'title': str_or_none(traverse_obj(metadata
, 'title', ('content', 'contentTitle'))),
171 'duration': float_or_none(traverse_obj(metadata
, ('content', 'duration'))),
172 'thumbnail': url_or_none(traverse_obj(metadata
, 'thumbnail', ('content', 'thumbnailUrl1'))),
173 'description': str_or_none(traverse_obj(metadata
, 'description', ('content', 'contentDescription'))),
174 'like_count': int_or_none(metadata
.get('likeCount')),
175 'dislike_count': int_or_none(metadata
.get('dislikeCount')),
176 'channel': str_or_none(traverse_obj(metadata
, ('createdBy', 'name'), ('creator', 'name'))),
177 'channel_id': traverse_obj(metadata
, ('createdBy', 'id'), ('creator', 'id')),
178 'channel_url': url_or_none(f
'https://rokfin.com/{uploader}') if uploader
else None,
179 'timestamp': timestamp
,
180 'release_timestamp': timestamp
if live_status
!= 'not_live' else None,
181 'tags': traverse_obj(metadata
, ('tags', ..., 'title'), expected_type
=str_or_none
),
182 'live_status': live_status
,
183 'availability': self
._availability
(
184 needs_premium
=bool(traverse_obj(metadata
, 'premiumPlan', 'premium')),
185 is_private
=False, needs_subscription
=False, needs_auth
=False, is_unlisted
=False),
186 # 'comment_count': metadata.get('numComments'), # Data provided by website is wrong
187 '__post_extractor': self
.extract_comments(video_id
) if video_type
== 'post' else None,
190 def _get_comments(self
, video_id
):
192 for page_n
in itertools
.count():
193 raw_comments
= self
._download
_json
(
194 f
'{_API_BASE_URL}comment?postId={video_id[5:]}&page={page_n}&size=50',
195 video_id
, note
=f
'Downloading viewer comments page {page_n + 1}{format_field(pages_total, None, " of %s")}',
198 for comment
in raw_comments
.get('content') or []:
200 'text': str_or_none(comment
.get('comment')),
201 'author': str_or_none(comment
.get('name')),
202 'id': comment
.get('commentId'),
203 'author_id': comment
.get('userId'),
205 'like_count': int_or_none(comment
.get('numLikes')),
206 'dislike_count': int_or_none(comment
.get('numDislikes')),
207 'timestamp': unified_timestamp(comment
.get('postedAt'))
210 pages_total
= int_or_none(raw_comments
.get('totalPages')) or None
211 is_last
= raw_comments
.get('last')
212 if not raw_comments
.get('content') or is_last
or (page_n
> pages_total
if pages_total
else is_last
is not False):
215 def _perform_login(self
, username
, password
):
216 # https://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth (Sec. 3.1)
217 login_page
= self
._download
_webpage
(
218 f
'{self._AUTH_BASE}/auth?client_id=web&redirect_uri=https%3A%2F%2Frokfin.com%2Ffeed&response_mode=fragment&response_type=code&scope=openid',
219 None, note
='loading login page', errnote
='error loading login page')
220 authentication_point_url
= unescapeHTML(self
._search
_regex
(
221 r
'<form\s+[^>]+action\s*=\s*"(https://secure\.rokfin\.com/auth/realms/rokfin-web/login-actions/authenticate\?[^"]+)"',
222 login_page
, name
='Authentication URL'))
224 resp_body
= self
._download
_webpage
(
225 authentication_point_url
, None, note
='logging in', fatal
=False, expected_status
=404,
226 data
=urlencode_postdata({'username': username, 'password': password, 'rememberMe': 'off', 'credentialId': ''}
))
227 if not self
._authentication
_active
():
228 if re
.search(r
'(?i)(invalid\s+username\s+or\s+password)', resp_body
or ''):
229 raise ExtractorError('invalid username/password', expected
=True)
230 raise ExtractorError('Login failed')
232 urlh
= self
._request
_webpage
(
233 f
'{self._AUTH_BASE}/auth', None,
234 note
='granting user authorization', errnote
='user authorization rejected by Rokfin',
238 'redirect_uri': 'https://rokfin.com/silent-check-sso.html',
239 'response_mode': 'fragment',
240 'response_type': 'code',
243 self
._access
_mgmt
_tokens
= self
._download
_json
(
244 f
'{self._AUTH_BASE}/token', None,
245 note
='getting access credentials', errnote
='error getting access credentials',
246 data
=urlencode_postdata({
247 'code': urllib
.parse
.parse_qs(urllib
.parse
.urldefrag(urlh
.url
).fragment
).get('code')[0],
249 'grant_type': 'authorization_code',
250 'redirect_uri': 'https://rokfin.com/silent-check-sso.html'
253 def _authentication_active(self
):
255 {'KEYCLOAK_IDENTITY', 'KEYCLOAK_IDENTITY_LEGACY', 'KEYCLOAK_SESSION', 'KEYCLOAK_SESSION_LEGACY'}
256 - set(self
._get
_cookies
(self
._AUTH
_BASE
)))
258 def _get_auth_token(self
):
259 return try_get(self
._access
_mgmt
_tokens
, lambda x
: ' '.join([x
['token_type'], x
['access_token']]))
261 def _download_json_using_access_token(self
, url_or_request
, video_id
, headers
={}, query={}
):
262 assert 'authorization' not in headers
263 headers
= headers
.copy()
264 auth_token
= self
._get
_auth
_token
()
265 refresh_token
= self
._access
_mgmt
_tokens
.get('refresh_token')
267 headers
['authorization'] = auth_token
269 json_string
, urlh
= self
._download
_webpage
_handle
(
270 url_or_request
, video_id
, headers
=headers
, query
=query
, expected_status
=401)
271 if not auth_token
or urlh
.status
!= 401 or refresh_token
is None:
272 return self
._parse
_json
(json_string
, video_id
)
274 self
._access
_mgmt
_tokens
= self
._download
_json
(
275 f
'{self._AUTH_BASE}/token', video_id
,
276 note
='User authorization expired or canceled by Rokfin. Re-authorizing ...', errnote
='Failed to re-authorize',
277 data
=urlencode_postdata({
278 'grant_type': 'refresh_token',
279 'refresh_token': refresh_token
,
282 headers
['authorization'] = self
._get
_auth
_token
()
283 if headers
['authorization'] is None:
284 raise ExtractorError('User authorization lost', expected
=True)
286 return self
._download
_json
(url_or_request
, video_id
, headers
=headers
, query
=query
)
289 class RokfinPlaylistBaseIE(InfoExtractor
):
294 'dead_stream': 'stream',
298 def _get_video_data(self
, metadata
):
299 for content
in metadata
.get('content') or []:
300 media_type
= self
._TYPES
.get(content
.get('mediaType'))
301 video_id
= content
.get('id') if media_type
== 'post' else content
.get('mediaId')
302 if not media_type
or not video_id
:
305 yield self
.url_result(f
'https://rokfin.com/{media_type}/{video_id}', video_id
=f
'{media_type}/{video_id}',
306 video_title
=str_or_none(traverse_obj(content
, ('content', 'contentTitle'))))
309 class RokfinStackIE(RokfinPlaylistBaseIE
):
310 IE_NAME
= 'rokfin:stack'
311 IE_DESC
= 'Rokfin Stacks'
312 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/stack/(?P<id>[^/]+)'
314 'url': 'https://www.rokfin.com/stack/271/Tulsi-Gabbard-Portsmouth-Townhall-FULL--Feb-9-2020',
321 def _real_extract(self
, url
):
322 list_id
= self
._match
_id
(url
)
323 return self
.playlist_result(self
._get
_video
_data
(
324 self
._download
_json
(f
'{_API_BASE_URL}stack/{list_id}', list_id
)), list_id
)
327 class RokfinChannelIE(RokfinPlaylistBaseIE
):
328 IE_NAME
= 'rokfin:channel'
329 IE_DESC
= 'Rokfin Channels'
330 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/(?!((feed/?)|(discover/?)|(channels/?))$)(?P<id>[^/]+)/?$'
332 'url': 'https://rokfin.com/TheConvoCouch',
333 'playlist_mincount': 100,
336 'title': 'TheConvoCouch - New',
337 'description': 'md5:bb622b1bca100209b91cd685f7847f06',
350 def _real_initialize(self
):
351 self
._validate
_extractor
_args
()
353 def _validate_extractor_args(self
):
354 requested_tabs
= self
._configuration
_arg
('tab', None)
355 if requested_tabs
is not None and (len(requested_tabs
) > 1 or requested_tabs
[0] not in self
._TABS
):
356 raise ExtractorError(f
'Invalid extractor-arg "tab". Must be one of {", ".join(self._TABS)}', expected
=True)
358 def _entries(self
, channel_id
, channel_name
, tab
):
360 for page_n
in itertools
.count(0):
361 if tab
in ('posts', 'top'):
362 data_url
= f
'{_API_BASE_URL}user/{channel_name}/{tab}?page={page_n}&size=50'
364 data_url
= f
'{_API_BASE_URL}post/search/{tab}?page={page_n}&size=50&creator={channel_id}'
365 metadata
= self
._download
_json
(
366 data_url
, channel_name
,
367 note
=f
'Downloading video metadata page {page_n + 1}{format_field(pages_total, None, " of %s")}')
369 yield from self
._get
_video
_data
(metadata
)
370 pages_total
= int_or_none(metadata
.get('totalPages')) or None
371 is_last
= metadata
.get('last')
372 if is_last
or (page_n
> pages_total
if pages_total
else is_last
is not False):
375 def _real_extract(self
, url
):
376 channel_name
= self
._match
_id
(url
)
377 channel_info
= self
._download
_json
(f
'{_API_BASE_URL}user/{channel_name}', channel_name
)
378 channel_id
= channel_info
['id']
379 tab
= self
._configuration
_arg
('tab', default
=['new'])[0]
381 return self
.playlist_result(
382 self
._entries
(channel_id
, channel_name
, self
._TABS
[tab
]),
383 f
'{channel_id}-{tab}', f
'{channel_name} - {tab.title()}', str_or_none(channel_info
.get('description')))
386 class RokfinSearchIE(SearchInfoExtractor
):
387 IE_NAME
= 'rokfin:search'
388 IE_DESC
= 'Rokfin Search'
389 _SEARCH_KEY
= 'rkfnsearch'
391 'video': (('id', 'raw'), 'post'),
392 'audio': (('id', 'raw'), 'post'),
393 'stream': (('content_id', 'raw'), 'stream'),
394 'dead_stream': (('content_id', 'raw'), 'stream'),
395 'stack': (('content_id', 'raw'), 'stack'),
398 'url': 'rkfnsearch5:"zelenko"',
402 'title': '"zelenko"',
406 _db_access_key
= None
408 def _real_initialize(self
):
409 self
._db
_url
, self
._db
_access
_key
= self
.cache
.load(self
.ie_key(), 'auth', default
=(None, None))
411 self
._get
_db
_access
_credentials
()
413 def _search_results(self
, query
):
415 for page_number
in itertools
.count(1):
416 search_results
= self
._run
_search
_query
(
417 query
, data
={'query': query, 'page': {'size': 100, 'current': page_number}
},
418 note
=f
'Downloading page {page_number}{format_field(total_pages, None, " of ~%s")}')
419 total_pages
= traverse_obj(search_results
, ('meta', 'page', 'total_pages'), expected_type
=int_or_none
)
421 for result
in search_results
.get('results') or []:
422 video_id_key
, video_type
= self
._TYPES
.get(traverse_obj(result
, ('content_type', 'raw')), (None, None))
423 video_id
= traverse_obj(result
, video_id_key
, expected_type
=int_or_none
)
424 if video_id
and video_type
:
425 yield self
.url_result(url
=f
'https://rokfin.com/{video_type}/{video_id}')
426 if not search_results
.get('results'):
429 def _run_search_query(self
, video_id
, data
, **kwargs
):
430 data
= json
.dumps(data
).encode()
431 for attempt
in range(2):
432 search_results
= self
._download
_json
(
433 self
._db
_url
, video_id
, data
=data
, fatal
=(attempt
== 1),
434 headers
={'authorization': self._db_access_key}
, **kwargs
)
436 return search_results
437 self
.write_debug('Updating access credentials')
438 self
._get
_db
_access
_credentials
(video_id
)
440 def _get_db_access_credentials(self
, video_id
=None):
441 auth_data
= {'SEARCH_KEY': None, 'ENDPOINT_BASE': None}
442 notfound_err_page
= self
._download
_webpage
(
443 'https://rokfin.com/discover', video_id
, expected_status
=404, note
='Downloading home page')
444 for js_file_path
in re
.findall(r
'<script\b[^>]*\ssrc\s*=\s*"(/static/js/[^">]+)"', notfound_err_page
):
445 js_content
= self
._download
_webpage
(
446 f
'https://rokfin.com{js_file_path}', video_id
, note
='Downloading JavaScript file', fatal
=False)
447 auth_data
.update(re
.findall(
448 rf
'REACT_APP_({"|".join(auth_data.keys())})\s*:\s*"([^"]+)"', js_content
or ''))
449 if not all(auth_data
.values()):
452 self
._db
_url
= url_or_none(f
'{auth_data["ENDPOINT_BASE"]}/api/as/v1/engines/rokfin-search/search.json')
453 self
._db
_access
_key
= f
'Bearer {auth_data["SEARCH_KEY"]}'
454 self
.cache
.store(self
.ie_key(), 'auth', (self
._db
_url
, self
._db
_access
_key
))
456 raise ExtractorError('Unable to extract access credentials')