5 from datetime
import datetime
7 from .common
import InfoExtractor
, SearchInfoExtractor
23 _API_BASE_URL
= 'https://prod-api-v2.production.rokfin.com/api/v2/public/'
26 class RokfinIE(InfoExtractor
):
27 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/(?P<id>(?P<type>post|stream)/\d+)'
28 _NETRC_MACHINE
= 'rokfin'
29 _AUTH_BASE
= 'https://secure.rokfin.com/auth/realms/rokfin-web/protocol/openid-connect'
30 _access_mgmt_tokens
= {} # OAuth 2.0: RFC 6749, Sec. 1.4-5
32 'url': 'https://www.rokfin.com/post/57548/Mitt-Romneys-Crazy-Solution-To-Climate-Change',
36 'title': 'Mitt Romney\'s Crazy Solution To Climate Change',
37 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
38 'upload_date': '20211023',
39 'timestamp': 1634998029,
40 'channel': 'Jimmy Dore',
42 'channel_url': 'https://rokfin.com/TheJimmyDoreShow',
44 'availability': 'public',
45 'live_status': 'not_live',
50 'url': 'https://rokfin.com/post/223/Julian-Assange-Arrested-Streaming-In-Real-Time',
54 'title': 'Julian Assange Arrested: Streaming In Real Time',
55 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
56 'upload_date': '20190412',
57 'timestamp': 1555052644,
58 'channel': 'Ron Placone',
60 'channel_url': 'https://rokfin.com/RonPlacone',
61 'availability': 'public',
62 'live_status': 'not_live',
65 'tags': ['FreeThinkingMedia^', 'RealProgressives^'],
68 'url': 'https://www.rokfin.com/stream/10543/Its-A-Crazy-Mess-Regional-Director-Blows-Whistle-On-Pfizers-Vaccine-Trial-Data',
72 'title': '"It\'s A Crazy Mess" Regional Director Blows Whistle On Pfizer\'s Vaccine Trial Data',
73 'thumbnail': r
're:https://img\.production\.rokfin\.com/.+',
74 'description': 'md5:324ce2d3e3b62e659506409e458b9d8e',
75 'channel': 'Ryan Cristián',
77 'channel_url': 'https://rokfin.com/TLAVagabond',
78 'availability': 'public',
81 'live_status': 'was_live',
82 'timestamp': 1635874720,
83 'release_timestamp': 1635874720,
84 'release_date': '20211102',
85 'upload_date': '20211102',
88 'tags': ['FreeThinkingMedia^'],
92 def _real_extract(self
, url
):
93 video_id
, video_type
= self
._match
_valid
_url
(url
).group('id', 'type')
94 metadata
= self
._download
_json
_using
_access
_token
(f
'{_API_BASE_URL}{video_id}', video_id
)
96 scheduled
= unified_timestamp(metadata
.get('scheduledAt'))
97 live_status
= ('was_live' if metadata
.get('stoppedAt')
98 else 'is_upcoming' if scheduled
99 else 'is_live' if video_type
== 'stream'
102 video_url
= traverse_obj(metadata
, 'url', ('content', 'contentUrl'), expected_type
=url_or_none
)
103 formats
, subtitles
= [{'url': video_url}
] if video_url
else [], {}
104 if determine_ext(video_url
) == 'm3u8':
105 formats
, subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
106 video_url
, video_id
, fatal
=False, live
=live_status
== 'is_live')
109 if traverse_obj(metadata
, 'premiumPlan', 'premium'):
110 self
.raise_login_required('This video is only available to premium users', True, method
='cookies')
112 self
.raise_no_formats(
113 f
'Stream is offline; scheduled for {datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
114 video_id
=video_id
, expected
=True)
116 uploader
= traverse_obj(metadata
, ('createdBy', 'username'), ('creator', 'username'))
117 timestamp
= (scheduled
or float_or_none(metadata
.get('postedAtMilli'), 1000)
118 or unified_timestamp(metadata
.get('creationDateTime')))
122 'subtitles': subtitles
,
123 'title': str_or_none(traverse_obj(metadata
, 'title', ('content', 'contentTitle'))),
124 'duration': float_or_none(traverse_obj(metadata
, ('content', 'duration'))),
125 'thumbnail': url_or_none(traverse_obj(metadata
, 'thumbnail', ('content', 'thumbnailUrl1'))),
126 'description': str_or_none(traverse_obj(metadata
, 'description', ('content', 'contentDescription'))),
127 'like_count': int_or_none(metadata
.get('likeCount')),
128 'dislike_count': int_or_none(metadata
.get('dislikeCount')),
129 'channel': str_or_none(traverse_obj(metadata
, ('createdBy', 'name'), ('creator', 'name'))),
130 'channel_id': traverse_obj(metadata
, ('createdBy', 'id'), ('creator', 'id')),
131 'channel_url': url_or_none(f
'https://rokfin.com/{uploader}') if uploader
else None,
132 'timestamp': timestamp
,
133 'release_timestamp': timestamp
if live_status
!= 'not_live' else None,
134 'tags': traverse_obj(metadata
, ('tags', ..., 'title'), expected_type
=str_or_none
),
135 'live_status': live_status
,
136 'availability': self
._availability
(
137 needs_premium
=bool(traverse_obj(metadata
, 'premiumPlan', 'premium')),
138 is_private
=False, needs_subscription
=False, needs_auth
=False, is_unlisted
=False),
139 # 'comment_count': metadata.get('numComments'), # Data provided by website is wrong
140 '__post_extractor': self
.extract_comments(video_id
) if video_type
== 'post' else None,
143 def _get_comments(self
, video_id
):
145 for page_n
in itertools
.count():
146 raw_comments
= self
._download
_json
(
147 f
'{_API_BASE_URL}comment?postId={video_id[5:]}&page={page_n}&size=50',
148 video_id
, note
=f
'Downloading viewer comments page {page_n + 1}{format_field(pages_total, None, " of %s")}',
151 for comment
in raw_comments
.get('content') or []:
153 'text': str_or_none(comment
.get('comment')),
154 'author': str_or_none(comment
.get('name')),
155 'id': comment
.get('commentId'),
156 'author_id': comment
.get('userId'),
158 'like_count': int_or_none(comment
.get('numLikes')),
159 'dislike_count': int_or_none(comment
.get('numDislikes')),
160 'timestamp': unified_timestamp(comment
.get('postedAt'))
163 pages_total
= int_or_none(raw_comments
.get('totalPages')) or None
164 is_last
= raw_comments
.get('last')
165 if not raw_comments
.get('content') or is_last
or (page_n
> pages_total
if pages_total
else is_last
is not False):
168 def _perform_login(self
, username
, password
):
169 # https://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth (Sec. 3.1)
170 login_page
= self
._download
_webpage
(
171 f
'{self._AUTH_BASE}/auth?client_id=web&redirect_uri=https%3A%2F%2Frokfin.com%2Ffeed&response_mode=fragment&response_type=code&scope=openid',
172 None, note
='loading login page', errnote
='error loading login page')
173 authentication_point_url
= unescapeHTML(self
._search
_regex
(
174 r
'<form\s+[^>]+action\s*=\s*"(https://secure\.rokfin\.com/auth/realms/rokfin-web/login-actions/authenticate\?[^"]+)"',
175 login_page
, name
='Authentication URL'))
177 resp_body
= self
._download
_webpage
(
178 authentication_point_url
, None, note
='logging in', fatal
=False, expected_status
=404,
179 data
=urlencode_postdata({'username': username, 'password': password, 'rememberMe': 'off', 'credentialId': ''}
))
180 if not self
._authentication
_active
():
181 if re
.search(r
'(?i)(invalid\s+username\s+or\s+password)', resp_body
or ''):
182 raise ExtractorError('invalid username/password', expected
=True)
183 raise ExtractorError('Login failed')
185 urlh
= self
._request
_webpage
(
186 f
'{self._AUTH_BASE}/auth', None,
187 note
='granting user authorization', errnote
='user authorization rejected by Rokfin',
191 'redirect_uri': 'https://rokfin.com/silent-check-sso.html',
192 'response_mode': 'fragment',
193 'response_type': 'code',
196 self
._access
_mgmt
_tokens
= self
._download
_json
(
197 f
'{self._AUTH_BASE}/token', None,
198 note
='getting access credentials', errnote
='error getting access credentials',
199 data
=urlencode_postdata({
200 'code': urllib
.parse
.parse_qs(urllib
.parse
.urldefrag(urlh
.geturl()).fragment
).get('code')[0],
202 'grant_type': 'authorization_code',
203 'redirect_uri': 'https://rokfin.com/silent-check-sso.html'
206 def _authentication_active(self
):
208 {'KEYCLOAK_IDENTITY', 'KEYCLOAK_IDENTITY_LEGACY', 'KEYCLOAK_SESSION', 'KEYCLOAK_SESSION_LEGACY'}
209 - set(self
._get
_cookies
(self
._AUTH
_BASE
)))
211 def _get_auth_token(self
):
212 return try_get(self
._access
_mgmt
_tokens
, lambda x
: ' '.join([x
['token_type'], x
['access_token']]))
214 def _download_json_using_access_token(self
, url_or_request
, video_id
, headers
={}, query={}
):
215 assert 'authorization' not in headers
216 headers
= headers
.copy()
217 auth_token
= self
._get
_auth
_token
()
218 refresh_token
= self
._access
_mgmt
_tokens
.get('refresh_token')
220 headers
['authorization'] = auth_token
222 json_string
, urlh
= self
._download
_webpage
_handle
(
223 url_or_request
, video_id
, headers
=headers
, query
=query
, expected_status
=401)
224 if not auth_token
or urlh
.code
!= 401 or refresh_token
is None:
225 return self
._parse
_json
(json_string
, video_id
)
227 self
._access
_mgmt
_tokens
= self
._download
_json
(
228 f
'{self._AUTH_BASE}/token', video_id
,
229 note
='User authorization expired or canceled by Rokfin. Re-authorizing ...', errnote
='Failed to re-authorize',
230 data
=urlencode_postdata({
231 'grant_type': 'refresh_token',
232 'refresh_token': refresh_token
,
235 headers
['authorization'] = self
._get
_auth
_token
()
236 if headers
['authorization'] is None:
237 raise ExtractorError('User authorization lost', expected
=True)
239 return self
._download
_json
(url_or_request
, video_id
, headers
=headers
, query
=query
)
242 class RokfinPlaylistBaseIE(InfoExtractor
):
247 'dead_stream': 'stream',
251 def _get_video_data(self
, metadata
):
252 for content
in metadata
.get('content') or []:
253 media_type
= self
._TYPES
.get(content
.get('mediaType'))
254 video_id
= content
.get('id') if media_type
== 'post' else content
.get('mediaId')
255 if not media_type
or not video_id
:
258 yield self
.url_result(f
'https://rokfin.com/{media_type}/{video_id}', video_id
=f
'{media_type}/{video_id}',
259 video_title
=str_or_none(traverse_obj(content
, ('content', 'contentTitle'))))
262 class RokfinStackIE(RokfinPlaylistBaseIE
):
263 IE_NAME
= 'rokfin:stack'
264 IE_DESC
= 'Rokfin Stacks'
265 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/stack/(?P<id>[^/]+)'
267 'url': 'https://www.rokfin.com/stack/271/Tulsi-Gabbard-Portsmouth-Townhall-FULL--Feb-9-2020',
274 def _real_extract(self
, url
):
275 list_id
= self
._match
_id
(url
)
276 return self
.playlist_result(self
._get
_video
_data
(
277 self
._download
_json
(f
'{_API_BASE_URL}stack/{list_id}', list_id
)), list_id
)
280 class RokfinChannelIE(RokfinPlaylistBaseIE
):
281 IE_NAME
= 'rokfin:channel'
282 IE_DESC
= 'Rokfin Channels'
283 _VALID_URL
= r
'https?://(?:www\.)?rokfin\.com/(?!((feed/?)|(discover/?)|(channels/?))$)(?P<id>[^/]+)/?$'
285 'url': 'https://rokfin.com/TheConvoCouch',
286 'playlist_mincount': 100,
289 'title': 'TheConvoCouch - New',
290 'description': 'md5:bb622b1bca100209b91cd685f7847f06',
303 def _real_initialize(self
):
304 self
._validate
_extractor
_args
()
306 def _validate_extractor_args(self
):
307 requested_tabs
= self
._configuration
_arg
('tab', None)
308 if requested_tabs
is not None and (len(requested_tabs
) > 1 or requested_tabs
[0] not in self
._TABS
):
309 raise ExtractorError(f
'Invalid extractor-arg "tab". Must be one of {", ".join(self._TABS)}', expected
=True)
311 def _entries(self
, channel_id
, channel_name
, tab
):
313 for page_n
in itertools
.count(0):
314 if tab
in ('posts', 'top'):
315 data_url
= f
'{_API_BASE_URL}user/{channel_name}/{tab}?page={page_n}&size=50'
317 data_url
= f
'{_API_BASE_URL}post/search/{tab}?page={page_n}&size=50&creator={channel_id}'
318 metadata
= self
._download
_json
(
319 data_url
, channel_name
,
320 note
=f
'Downloading video metadata page {page_n + 1}{format_field(pages_total, None, " of %s")}')
322 yield from self
._get
_video
_data
(metadata
)
323 pages_total
= int_or_none(metadata
.get('totalPages')) or None
324 is_last
= metadata
.get('last')
325 if is_last
or (page_n
> pages_total
if pages_total
else is_last
is not False):
328 def _real_extract(self
, url
):
329 channel_name
= self
._match
_id
(url
)
330 channel_info
= self
._download
_json
(f
'{_API_BASE_URL}user/{channel_name}', channel_name
)
331 channel_id
= channel_info
['id']
332 tab
= self
._configuration
_arg
('tab', default
=['new'])[0]
334 return self
.playlist_result(
335 self
._entries
(channel_id
, channel_name
, self
._TABS
[tab
]),
336 f
'{channel_id}-{tab}', f
'{channel_name} - {tab.title()}', str_or_none(channel_info
.get('description')))
339 class RokfinSearchIE(SearchInfoExtractor
):
340 IE_NAME
= 'rokfin:search'
341 IE_DESC
= 'Rokfin Search'
342 _SEARCH_KEY
= 'rkfnsearch'
344 'video': (('id', 'raw'), 'post'),
345 'audio': (('id', 'raw'), 'post'),
346 'stream': (('content_id', 'raw'), 'stream'),
347 'dead_stream': (('content_id', 'raw'), 'stream'),
348 'stack': (('content_id', 'raw'), 'stack'),
351 'url': 'rkfnsearch5:"zelenko"',
355 'title': '"zelenko"',
359 _db_access_key
= None
361 def _real_initialize(self
):
362 self
._db
_url
, self
._db
_access
_key
= self
.cache
.load(self
.ie_key(), 'auth', default
=(None, None))
364 self
._get
_db
_access
_credentials
()
366 def _search_results(self
, query
):
368 for page_number
in itertools
.count(1):
369 search_results
= self
._run
_search
_query
(
370 query
, data
={'query': query, 'page': {'size': 100, 'current': page_number}
},
371 note
=f
'Downloading page {page_number}{format_field(total_pages, None, " of ~%s")}')
372 total_pages
= traverse_obj(search_results
, ('meta', 'page', 'total_pages'), expected_type
=int_or_none
)
374 for result
in search_results
.get('results') or []:
375 video_id_key
, video_type
= self
._TYPES
.get(traverse_obj(result
, ('content_type', 'raw')), (None, None))
376 video_id
= traverse_obj(result
, video_id_key
, expected_type
=int_or_none
)
377 if video_id
and video_type
:
378 yield self
.url_result(url
=f
'https://rokfin.com/{video_type}/{video_id}')
379 if not search_results
.get('results'):
382 def _run_search_query(self
, video_id
, data
, **kwargs
):
383 data
= json
.dumps(data
).encode()
384 for attempt
in range(2):
385 search_results
= self
._download
_json
(
386 self
._db
_url
, video_id
, data
=data
, fatal
=(attempt
== 1),
387 headers
={'authorization': self._db_access_key}
, **kwargs
)
389 return search_results
390 self
.write_debug('Updating access credentials')
391 self
._get
_db
_access
_credentials
(video_id
)
393 def _get_db_access_credentials(self
, video_id
=None):
394 auth_data
= {'SEARCH_KEY': None, 'ENDPOINT_BASE': None}
395 notfound_err_page
= self
._download
_webpage
(
396 'https://rokfin.com/discover', video_id
, expected_status
=404, note
='Downloading home page')
397 for js_file_path
in re
.findall(r
'<script\b[^>]*\ssrc\s*=\s*"(/static/js/[^">]+)"', notfound_err_page
):
398 js_content
= self
._download
_webpage
(
399 f
'https://rokfin.com{js_file_path}', video_id
, note
='Downloading JavaScript file', fatal
=False)
400 auth_data
.update(re
.findall(
401 rf
'REACT_APP_({"|".join(auth_data.keys())})\s*:\s*"([^"]+)"', js_content
or ''))
402 if not all(auth_data
.values()):
405 self
._db
_url
= url_or_none(f
'{auth_data["ENDPOINT_BASE"]}/api/as/v1/engines/rokfin-search/search.json')
406 self
._db
_access
_key
= f
'Bearer {auth_data["SEARCH_KEY"]}'
407 self
.cache
.store(self
.ie_key(), 'auth', (self
._db
_url
, self
._db
_access
_key
))
409 raise ExtractorError('Unable to extract access credentials')