2 from __future__
import unicode_literals
9 from .common
import InfoExtractor
10 from ..compat
import (
12 compat_urllib_parse_unquote
,
29 class PolskieRadioBaseExtractor(InfoExtractor
):
30 def _extract_webpage_player_entries(self
, webpage
, playlist_id
, base_data
):
33 for data_media
in re
.findall(r
'<[^>]+data-media="?({[^>]+})"?', webpage
):
34 media
= self
._parse
_json
(data_media
, playlist_id
, transform_source
=unescapeHTML
, fatal
=False)
35 if not media
.get('file') or not media
.get('desc'):
37 media_url
= self
._proto
_relative
_url
(media
['file'])
38 if media_url
in media_urls
:
40 media_urls
.add(media_url
)
41 entry
= base_data
.copy()
43 'id': compat_str(media
['id']),
45 'duration': int_or_none(media
.get('length')),
46 'vcodec': 'none' if media
.get('provider') == 'audio' else None,
48 entry_title
= compat_urllib_parse_unquote(media
['desc'])
50 entry
['title'] = entry_title
54 class PolskieRadioIE(PolskieRadioBaseExtractor
):
55 _VALID_URL
= r
'https?://(?:www\.)?polskieradio(?:24)?\.pl/\d+/\d+/Artykul/(?P<id>[0-9]+)'
56 _TESTS
= [{ # Old-style single broadcast.
57 'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943,Prof-Andrzej-Nowak-o-historii-nie-da-sie-myslec-beznamietnie',
60 'title': 'Prof. Andrzej Nowak: o historii nie da się myśleć beznamiętnie',
61 'description': 'md5:12f954edbf3120c5e7075e17bf9fc5c5',
64 'md5': '2984ee6ce9046d91fc233bc1a864a09a',
68 'title': 'md5:d4623290d4ac983bf924061c75c23a0d',
69 'timestamp': 1456594200,
70 'upload_date': '20160227',
72 'thumbnail': r
're:^https?://static\.prsa\.pl/images/.*\.jpg$'
75 }, { # New-style single broadcast.
76 'url': 'https://www.polskieradio.pl/8/2382/Artykul/2534482,Zagarysci-Poezja-jak-spoiwo',
79 'title': 'Żagaryści. Poezja jak spoiwo',
80 'description': 'md5:f18d95d5dcba747a09b635e21a4c0695',
83 'md5': 'd07559829f61d5a93a75755987ded760',
87 'title': 'md5:c6e1234e0b747ad883cb91b7ad06b98c',
88 'timestamp': 1592654400,
89 'upload_date': '20200620',
91 'thumbnail': r
're:^https?://static\.prsa\.pl/images/.*\.jpg$'
95 # PR4 audition - other frontend
96 'url': 'https://www.polskieradio.pl/10/6071/Artykul/2610977,Poglos-29-pazdziernika-godz-2301',
100 'title': 'Pogłos 29 października godz. 23:01',
103 'url': 'http://polskieradio.pl/9/305/Artykul/1632955,Bardzo-popularne-slowo-remis',
104 'only_matching': True,
106 'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943',
107 'only_matching': True,
110 'url': 'http://www.polskieradio.pl/9/299/Artykul/1634903,Brexit-Leszek-Miller-swiat-sie-nie-zawali-Europa-bedzie-trwac-dalej',
111 'only_matching': True,
113 'url': 'https://polskieradio24.pl/130/4503/Artykul/2621876,Narusza-nasza-suwerennosc-Publicysci-o-uzaleznieniu-funduszy-UE-od-praworzadnosci',
114 'only_matching': True,
117 def _real_extract(self
, url
):
118 playlist_id
= self
._match
_id
(url
)
120 webpage
= self
._download
_webpage
(url
, playlist_id
)
122 content
= self
._search
_regex
(
123 r
'(?s)<div[^>]+class="\s*this-article\s*"[^>]*>(.+?)<div[^>]+class="tags"[^>]*>',
124 webpage
, 'content', default
=None)
126 timestamp
= unified_timestamp(self
._html
_search
_regex
(
127 r
'(?s)<span[^>]+id="datetime2"[^>]*>(.+?)</span>',
128 webpage
, 'timestamp', default
=None))
130 thumbnail_url
= self
._og
_search
_thumbnail
(webpage
, default
=None)
132 title
= self
._og
_search
_title
(webpage
).strip()
134 description
= strip_or_none(self
._og
_search
_description
(webpage
, default
=None))
135 description
= description
.replace('\xa0', ' ') if description
is not None else None
140 'url': self
._proto
_relative
_url
(
142 r
"source:\s*'(//static\.prsa\.pl/[^']+)'",
143 webpage
, 'audition record url')),
145 'description': description
,
146 'timestamp': timestamp
,
147 'thumbnail': thumbnail_url
,
150 entries
= self
._extract
_webpage
_player
_entries
(content
, playlist_id
, {
152 'timestamp': timestamp
,
153 'thumbnail': thumbnail_url
,
156 return self
.playlist_result(entries
, playlist_id
, title
, description
)
159 class PolskieRadioCategoryIE(InfoExtractor
):
160 _VALID_URL
= r
'https?://(?:www\.)?polskieradio\.pl/\d+(?:,[^/]+)?/(?P<id>\d+)'
162 'url': 'http://www.polskieradio.pl/7/5102,HISTORIA-ZYWA',
165 'title': 'HISTORIA ŻYWA',
167 'playlist_mincount': 38,
169 'url': 'http://www.polskieradio.pl/7/4807',
172 'title': 'Vademecum 1050. rocznicy Chrztu Polski'
174 'playlist_mincount': 5
176 'url': 'http://www.polskieradio.pl/7/129,Sygnaly-dnia?ref=source',
177 'only_matching': True
179 'url': 'http://www.polskieradio.pl/37,RedakcjaKatolicka/4143,Kierunek-Krakow',
182 'title': 'Kierunek Kraków',
184 'playlist_mincount': 61
186 'url': 'http://www.polskieradio.pl/10,czworka/214,muzyka',
191 'playlist_mincount': 61
193 'url': 'http://www.polskieradio.pl/7,Jedynka/5102,HISTORIA-ZYWA',
194 'only_matching': True,
196 'url': 'http://www.polskieradio.pl/8,Dwojka/196,Publicystyka',
197 'only_matching': True,
201 def suitable(cls
, url
):
202 return False if PolskieRadioIE
.suitable(url
) else super(PolskieRadioCategoryIE
, cls
).suitable(url
)
204 def _entries(self
, url
, page
, category_id
):
206 for page_num
in itertools
.count(2):
207 for a_entry
, entry_id
in re
.findall(
208 r
'(?s)<article[^>]+>.*?(<a[^>]+href=["\']/\d
+/\d
+/Artykul
/(\d
+)[^
>]+>).*?
</article
>',
210 entry = extract_attributes(a_entry)
211 href = entry.get('href
')
214 yield self.url_result(
215 compat_urlparse.urljoin(url, href), PolskieRadioIE.ie_key(),
216 entry_id, entry.get('title
'))
218 r'<div
[^
>]+class=["\']next["\'][^
>]*>\s
*<a
[^
>]+href
=(["\'])(?P<url>(?:(?!\1).)+)\1',
222 next_url = compat_urlparse.urljoin(url, mobj.group('url'))
223 content = self._download_webpage(
224 next_url, category_id, 'Downloading page %s' % page_num)
226 def _real_extract(self, url):
227 category_id = self._match_id(url)
228 webpage = self._download_webpage(url, category_id)
229 title = self._html_search_regex(
230 r'<title>([^<]+) - [^<]+ - [^<]+</title>',
231 webpage, 'title', fatal=False)
232 return self.playlist_result(
233 self._entries(url, webpage, category_id),
237 class PolskieRadioPlayerIE(InfoExtractor):
238 IE_NAME = 'polskieradio:player'
239 _VALID_URL = r'https?://player\.polskieradio\.pl/anteny/(?P<id>[^/]+)'
241 _BASE_URL = 'https://player.polskieradio.pl'
242 _PLAYER_URL = 'https://player.polskieradio.pl/main.bundle.js'
243 _STATIONS_API_URL = 'https://apipr.polskieradio.pl/api/stacje'
246 'url': 'https://player.polskieradio.pl/anteny/trojka',
253 'format': 'bestaudio',
254 'skip_download': 'endless stream',
258 def _get_channel_list(self, channel_url='no_channel'):
259 player_code = self._download_webpage(
260 self._PLAYER_URL, channel_url,
261 note='Downloading js player')
262 channel_list = js_to_json(self._search_regex(
263 r';var r="anteny
",a=(\[.+?\])},', player_code, 'channel list'))
264 return self._parse_json(channel_list, channel_url)
266 def _real_extract(self, url):
267 channel_url = self._match_id(url)
268 channel_list = self._get_channel_list(channel_url)
270 channel = next((c for c in channel_list if c.get('url') == channel_url), None)
273 raise ExtractorError('Channel not found')
275 station_list = self._download_json(self._STATIONS_API_URL, channel_url,
276 note='Downloading stream url list',
278 'Accept': 'application/json',
280 'Origin': self._BASE_URL,
282 station = next((s for s in station_list
283 if s.get('Name') == (channel.get('streamName') or channel.get('name'))), None)
285 raise ExtractorError('Station not found even though we extracted channel')
288 for stream_url in station['Streams']:
289 stream_url = self._proto_relative_url(stream_url)
290 if stream_url.endswith('/playlist.m3u8'):
291 formats.extend(self._extract_m3u8_formats(stream_url, channel_url, live=True))
292 elif stream_url.endswith('/manifest.f4m'):
293 formats.extend(self._extract_mpd_formats(stream_url, channel_url))
294 elif stream_url.endswith('/Manifest'):
295 formats.extend(self._extract_ism_formats(stream_url, channel_url))
301 self._sort_formats(formats)
304 'id': compat_str(channel['id']),
306 'title': channel.get('name') or channel.get('streamName'),
307 'display_id': channel_url,
308 'thumbnail': f'{self._BASE_URL}/images/{channel_url}-color-logo.png',
313 class PolskieRadioPodcastBaseExtractor(InfoExtractor):
314 _API_BASE = 'https://apipodcasts.polskieradio.pl/api'
316 def _parse_episode(self, data):
321 'filesize': int_or_none(data.get('fileSize')),
323 'title': data['title'],
324 'description': data.get('description'),
325 'duration': int_or_none(data.get('length')),
326 'timestamp': parse_iso8601(data.get('publishDate')),
327 'thumbnail': url_or_none(data.get('image')),
328 'series': data.get('podcastTitle'),
329 'episode': data['title'],
333 class PolskieRadioPodcastListIE(PolskieRadioPodcastBaseExtractor):
334 IE_NAME = 'polskieradio:podcast:list'
335 _VALID_URL = r'https?://podcasty\.polskieradio\.pl/podcast/(?P<id>\d+)'
337 'url': 'https://podcasty.polskieradio.pl/podcast/8/',
340 'title': 'Śniadanie w Trójce',
341 'description': 'md5:57abcc27bc4c6a6b25baa3061975b9ef',
342 'uploader': 'Beata Michniewicz',
344 'playlist_mincount': 714,
348 def _call_api(self, podcast_id, page):
349 return self._download_json(
350 f'{self._API_BASE}/Podcasts/{podcast_id}/?pageSize={self._PAGE_SIZE}&page={page}',
351 podcast_id, f'Downloading page {page}')
353 def _real_extract(self, url):
354 podcast_id = self._match_id(url)
355 data = self._call_api(podcast_id, 1)
357 def get_page(page_num):
358 page_data = self._call_api(podcast_id, page_num + 1) if page_num else data
359 yield from (self._parse_episode(ep) for ep in page_data['items'])
363 'entries': InAdvancePagedList(
364 get_page, math.ceil(data['itemCount'] / self._PAGE_SIZE), self._PAGE_SIZE),
365 'id': str(data['id']),
366 'title': data['title'],
367 'description': data.get('description'),
368 'uploader': data.get('announcer'),
372 class PolskieRadioPodcastIE(PolskieRadioPodcastBaseExtractor):
373 IE_NAME = 'polskieradio:podcast'
374 _VALID_URL = r'https?://podcasty\.polskieradio\.pl/track/(?P<id>[a-f\d]{8}(?:-[a-f\d]{4}){4}[a-f\d]{8})'
376 'url': 'https://podcasty.polskieradio.pl/track/6eafe403-cb8f-4756-b896-4455c3713c32',
378 'id': '6eafe403-cb8f-4756-b896-4455c3713c32',
380 'title': 'Theresa May rezygnuje. Co dalej z brexitem?',
381 'description': 'md5:e41c409a29d022b70ef0faa61dbded60',
385 def _real_extract(self, url):
386 podcast_id = self._match_id(url)
387 data = self._download_json(
388 f'{self._API_BASE}/audio',
389 podcast_id, 'Downloading podcast metadata',
391 'guids': [podcast_id],
394 'Content-Type': 'application/json',
396 return self._parse_episode(data[0])
399 class PolskieRadioRadioKierowcowIE(PolskieRadioBaseExtractor):
400 _VALID_URL = r'https?://(?:www\.)?radiokierowcow\.pl/artykul/(?P<id>[0-9]+)'
401 IE_NAME = 'polskieradio:kierowcow'
404 'url': 'https://radiokierowcow.pl/artykul/2694529',
407 'title': 'Zielona fala reliktem przeszłości?',
408 'description': 'md5:343950a8717c9818fdfd4bd2b8ca9ff2',
413 def _real_extract(self, url):
414 media_id = self._match_id(url)
415 webpage = self._download_webpage(url, media_id)
416 nextjs_build = self._search_nextjs_data(webpage, media_id)['buildId']
417 article = self._download_json(
418 f'https://radiokierowcow.pl/_next/data/{nextjs_build}/artykul/{media_id}.json?articleId={media_id}',
420 data = article['pageProps']['data']
421 title = data['title']
422 entries = self._extract_webpage_player_entries(data['content'], media_id, {
431 'description': data.get('lead'),