7 from .adobepass
import AdobePassIE
8 from .once
import OnceIE
9 from ..networking
import HEADRequest
, Request
26 default_ns
= 'http://www.w3.org/2005/SMIL21/Language'
27 _x
= lambda p
: xpath_with_ns(p
, {'smil': default_ns}
)
30 class ThePlatformBaseIE(OnceIE
):
33 def _extract_theplatform_smil(self
, smil_url
, video_id
, note
='Downloading SMIL data'):
34 meta
= self
._download
_xml
(
35 smil_url
, video_id
, note
=note
, query
={'format': 'SMIL'}
,
36 headers
=self
.geo_verification_headers())
37 error_element
= find_xpath_attr(meta
, _x('.//smil:ref'), 'src')
38 if error_element
is not None:
39 exception
= find_xpath_attr(
40 error_element
, _x('.//smil:param'), 'name', 'exception')
41 if exception
is not None:
42 if exception
.get('value') == 'GeoLocationBlocked':
43 self
.raise_geo_restricted(error_element
.attrib
['abstract'])
44 elif error_element
.attrib
['src'].startswith(
45 'http://link.theplatform.%s/s/errorFiles/Unavailable.'
48 error_element
.attrib
['abstract'], expected
=True)
50 smil_formats
, subtitles
= self
._parse
_smil
_formats
_and
_subtitles
(
51 meta
, smil_url
, video_id
, namespace
=default_ns
,
52 # the parameters are from syfy.com, other sites may use others,
53 # they also work for nbc.com
54 f4m_params
={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'}
,
55 transform_rtmp_url
=lambda streamer
, src
: (streamer
, 'mp4:' + src
))
58 for _format
in smil_formats
:
59 if OnceIE
.suitable(_format
['url']):
60 formats
.extend(self
._extract
_once
_formats
(_format
['url']))
62 media_url
= _format
['url']
63 if determine_ext(media_url
) == 'm3u8':
64 hdnea2
= self
._get
_cookies
(media_url
).get('hdnea2')
66 _format
['url'] = update_url_query(media_url
, {'hdnea3': hdnea2.value}
)
68 formats
.append(_format
)
70 return formats
, subtitles
72 def _download_theplatform_metadata(self
, path
, video_id
):
73 info_url
= 'http://link.theplatform.%s/s/%s?format=preview' % (self
._TP
_TLD
, path
)
74 return self
._download
_json
(info_url
, video_id
)
76 def _parse_theplatform_metadata(self
, info
):
78 captions
= info
.get('captions')
79 if isinstance(captions
, list):
80 for caption
in captions
:
81 lang
, src
, mime
= caption
.get('lang', 'en'), caption
.get('src'), caption
.get('type')
82 subtitles
.setdefault(lang
, []).append({
83 'ext': mimetype2ext(mime
),
87 duration
= info
.get('duration')
88 tp_chapters
= info
.get('chapters', [])
91 def _add_chapter(start_time
, end_time
):
92 start_time
= float_or_none(start_time
, 1000)
93 end_time
= float_or_none(end_time
, 1000)
94 if start_time
is None or end_time
is None:
97 'start_time': start_time
,
101 for chapter
in tp_chapters
[:-1]:
102 _add_chapter(chapter
.get('startTime'), chapter
.get('endTime'))
103 _add_chapter(tp_chapters
[-1].get('startTime'), tp_chapters
[-1].get('endTime') or duration
)
105 def extract_site_specific_field(field
):
106 # A number of sites have custom-prefixed keys, e.g. 'cbc$seasonNumber'
107 return traverse_obj(info
, lambda k
, v
: v
and k
.endswith(f
'${field}'), get_all
=False)
110 'title': info
['title'],
111 'subtitles': subtitles
,
112 'description': info
['description'],
113 'thumbnail': info
['defaultThumbnailUrl'],
114 'duration': float_or_none(duration
, 1000),
115 'timestamp': int_or_none(info
.get('pubDate'), 1000) or None,
116 'uploader': info
.get('billingCode'),
117 'chapters': chapters
,
118 'creator': traverse_obj(info
, ('author', {str}
)) or None,
119 'categories': traverse_obj(info
, (
120 'categories', lambda _
, v
: v
.get('label') in ('category', None), 'name', {str}
)) or None,
121 'tags': traverse_obj(info
, ('keywords', {lambda x: re.split(r'[;,]\s?', x) if x else None}
)),
122 'location': extract_site_specific_field('region'),
123 'series': extract_site_specific_field('show'),
124 'season_number': int_or_none(extract_site_specific_field('seasonNumber')),
125 'media_type': extract_site_specific_field('programmingType') or extract_site_specific_field('type'),
128 def _extract_theplatform_metadata(self
, path
, video_id
):
129 info
= self
._download
_theplatform
_metadata
(path
, video_id
)
130 return self
._parse
_theplatform
_metadata
(info
)
133 class ThePlatformIE(ThePlatformBaseIE
, AdobePassIE
):
134 _VALID_URL
= r
'''(?x)
135 (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
136 (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
137 |theplatform:)(?P<id>[^/\?&]+)'''
141 property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+
142 content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2''',
143 r
'(?s)<(?:iframe|script)[^>]+src=(["\'])(?P
<url
>(?
:https?
:)?
//player\
.theplatform\
.com
/p
/.+?
)\
1'
147 # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
148 'url
': 'http
://link
.theplatform
.com
/s
/dJ5BDC
/e9I_cZgTgIPd
/meta
.smil?format
=smil
&Tracking
=true
&mbr
=true
',
150 'id': 'e9I_cZgTgIPd
',
152 'title
': 'Blackberry
\'s big
, bold Z30
',
153 'description
': 'The Z30
is Blackberry
\'s biggest
, baddest mobile messaging device yet
.',
155 'timestamp
': 1383239700,
156 'upload_date
': '20131031',
157 'uploader
': 'CBSI
-NEW
',
161 'skip_download
': True,
163 'skip
': '404 Not Found
',
165 # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
166 'url
': 'http
://link
.theplatform
.com
/s
/kYEXFC
/22d_qsQ6MIRT
',
168 'id': '22d_qsQ6MIRT
',
170 'description
': 'md5
:ac330c9258c04f9d7512cf26b9595409
',
171 'title
': 'Tesla Model S
: A second step towards a cleaner motoring future
',
172 'timestamp
': 1426176191,
173 'upload_date
': '20150312',
174 'uploader
': 'CBSI
-NEW
',
178 'skip_download
': True,
180 'skip
': 'CNet no longer uses ThePlatform
',
182 'url
': 'https
://player
.theplatform
.com
/p
/D6x
-PC
/pulse_preview
/embed
/select
/media
/yMBg9E8KFxZD
',
184 'id': 'yMBg9E8KFxZD
',
186 'description
': 'md5
:644ad9188d655b742f942bf2e06b002d
',
187 'title
': 'HIGHLIGHTS
: USA bag first ever series Cup win
',
192 'url
': 'http
://player
.theplatform
.com
/p
/NnzsPC
/widget
/select
/media
/4Y0TlYUr_ZT7
',
193 'only_matching
': True,
195 'url
': 'http
://player
.theplatform
.com
/p
/2E2eJC
/nbcNewsOffsite?guid
=tdy_or_siri_150701
',
196 'md5
': 'fb96bb3d85118930a5b055783a3bd992
',
198 'id': 'tdy_or_siri_150701
',
200 'title
': 'iPhone Siri’s sassy response to a math question has people talking
',
201 'description
': 'md5
:a565d1deadd5086f3331d57298ec6333
',
203 'thumbnail
': r're
:^https?
://.*\
.jpg$
',
204 'timestamp
': 1435752600,
205 'upload_date
': '20150701',
206 'uploader
': 'NBCU
-NEWS
',
208 'skip
': 'Error
: Player PID
"nbcNewsOffsite" is disabled
',
210 # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
211 # geo-restricted (US), HLS encrypted with AES-128
212 'url
': 'http
://player
.theplatform
.com
/p
/NnzsPC
/onsite_universal
/select
/media
/guid
/2410887629/2928790?fwsitesection
=nbc_the_blacklist_video_library
&autoPlay
=true
&carouselID
=137781',
213 'only_matching
': True,
217 def _extract_embed_urls(cls, url, webpage):
218 # Are whitespaces ignored in URLs?
219 # https://github.com/ytdl-org/youtube-dl/issues/12044
220 for embed_url in super()._extract_embed_urls(url, webpage):
221 yield re.sub(r'\s
', '', embed_url)
224 def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
225 flags = '10' if include_qs else '00'
226 expiration_date = '%x' % (int(time.time()) + life)
229 return binascii.b2a_hex(str.encode('ascii
')).decode('ascii
')
231 def hex_to_bytes(hex):
232 return binascii.a2b_hex(hex.encode('ascii
'))
234 relative_path = re.match(r'https?
://link\
.theplatform\
.com
/s
/([^?
]+)', url).group(1)
235 clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path))
236 checksum = hmac.new(sig_key.encode('ascii
'), clear_text, hashlib.sha1).hexdigest()
237 sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
238 return '%s&sig
=%s' % (url, sig)
240 def _real_extract(self, url):
241 url, smuggled_data = unsmuggle_url(url, {})
242 self._initialize_geo_bypass({
243 'countries
': smuggled_data.get('geo_countries
'),
246 mobj = self._match_valid_url(url)
247 provider_id = mobj.group('provider_id
')
248 video_id = mobj.group('id')
251 provider_id = 'dJ5BDC
'
253 path = provider_id + '/'
254 if mobj.group('media
'):
255 path += mobj.group('media
')
258 qs_dict = parse_qs(url)
259 if 'guid
' in qs_dict:
260 webpage = self._download_webpage(url, video_id)
261 scripts = re.findall(r'<script
[^
>]+src
="([^"]+)"', webpage)
263 # feed id usually locates in the last script.
264 # Seems there's no pattern for the interested script filename, so
266 for script in reversed(scripts):
267 feed_script = self._download_webpage(
268 self._proto_relative_url(script, 'http:'),
269 video_id, 'Downloading feed script')
270 feed_id = self._search_regex(
271 r'defaultFeedId\s*:\s*"([^
"]+)"', feed_script,
272 'default feed
id', default=None)
273 if feed_id is not None:
276 raise ExtractorError('Unable to find feed
id')
277 return self.url_result('http
://feed
.theplatform
.com
/f
/%s/%s?byGuid
=%s' % (
278 provider_id, feed_id, qs_dict['guid
'][0]))
280 if smuggled_data.get('force_smil_url
', False):
282 # Explicitly specified SMIL (see https://github.com/ytdl-org/youtube-dl/issues/7385)
283 elif '/guid
/' in url:
285 source_url = smuggled_data.get('source_url
')
287 headers['Referer
'] = source_url
288 request = Request(url, headers=headers)
289 webpage = self._download_webpage(request, video_id)
290 smil_url = self._search_regex(
291 r'<link
[^
>]+href
=(["\'])(?P<url>.+?)\1[^>]+type=["\']application
/smil\
+xml
',
292 webpage, 'smil url
', group='url
')
293 path = self._search_regex(
294 r'link\
.theplatform\
.com
/s
/((?
:[^
/?
#&]+/)+[^/?#&]+)', smil_url, 'path')
295 smil_url
+= '?' if '?' not in smil_url
else '&' + 'formats=m3u,mpeg4'
296 elif mobj
.group('config'):
297 config_url
= url
+ '&form=json'
298 config_url
= config_url
.replace('swf/', 'config/')
299 config_url
= config_url
.replace('onsite/', 'onsite/config/')
300 config
= self
._download
_json
(config_url
, video_id
, 'Downloading config')
301 if 'releaseUrl' in config
:
302 release_url
= config
['releaseUrl']
304 release_url
= 'http://link.theplatform.com/s/%s?mbr=true' % path
305 smil_url
= release_url
+ '&formats=MPEG4&manifest=f4m'
307 smil_url
= 'http://link.theplatform.com/s/%s?mbr=true' % path
309 sig
= smuggled_data
.get('sig')
311 smil_url
= self
._sign
_url
(smil_url
, sig
['key'], sig
['secret'])
313 formats
, subtitles
= self
._extract
_theplatform
_smil
(smil_url
, video_id
)
315 # With some sites, manifest URL must be forced to extract HLS formats
316 if not traverse_obj(formats
, lambda _
, v
: v
['format_id'].startswith('hls')):
317 m3u8_url
= update_url(url
, query
='mbr=true&manifest=m3u', fragment
=None)
318 urlh
= self
._request
_webpage
(
319 HEADRequest(m3u8_url
), video_id
, 'Checking for HLS formats', 'No HLS formats found', fatal
=False)
320 if urlh
and urlhandle_detect_ext(urlh
) == 'm3u8':
321 m3u8_fmts
, m3u8_subs
= self
._extract
_m
3u8_formats
_and
_subtitles
(
322 m3u8_url
, video_id
, m3u8_id
='hls', fatal
=False)
323 formats
.extend(m3u8_fmts
)
324 self
._merge
_subtitles
(m3u8_subs
, target
=subtitles
)
326 ret
= self
._extract
_theplatform
_metadata
(path
, video_id
)
327 combined_subtitles
= self
._merge
_subtitles
(ret
.get('subtitles', {}), subtitles
)
331 'subtitles': combined_subtitles
,
337 class ThePlatformFeedIE(ThePlatformBaseIE
):
338 _URL_TEMPLATE
= '%s//feed.theplatform.com/f/%s/%s?form=json&%s'
339 _VALID_URL
= r
'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[^&]+))'
341 # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
342 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
343 'md5': '6e32495b5073ab414471b615c5ded394',
345 'id': 'n_hardball_5biden_140207',
347 'title': 'The Biden factor: will Joe run in 2016?',
348 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
349 'thumbnail': r
're:^https?://.*\.jpg$',
350 'upload_date': '20140208',
351 'timestamp': 1391824260,
353 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
354 'uploader': 'NBCU-NEWS',
357 'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byGuid=nn_netcast_180306.Copy.01',
358 'only_matching': True,
361 def _extract_feed_info(self
, provider_id
, feed_id
, filter_query
, video_id
, custom_fields
=None, asset_types_query
={}, account_id
=None):
362 real_url
= self
._URL
_TEMPLATE
% (self
.http_scheme(), provider_id
, feed_id
, filter_query
)
363 entry
= self
._download
_json
(real_url
, video_id
)['entries'][0]
364 main_smil_url
= 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id
, account_id
, entry
['guid']) if account_id
else entry
.get('plmedia$publicUrl')
368 first_video_id
= None
371 for item
in entry
['media$content']:
372 smil_url
= item
['plfile$url']
373 cur_video_id
= ThePlatformIE
._match
_id
(smil_url
)
374 if first_video_id
is None:
375 first_video_id
= cur_video_id
376 duration
= float_or_none(item
.get('plfile$duration'))
377 file_asset_types
= item
.get('plfile$assetTypes') or parse_qs(smil_url
)['assetTypes']
378 for asset_type
in file_asset_types
:
379 if asset_type
in asset_types
:
381 asset_types
.append(asset_type
)
384 'formats': item
['plfile$format'],
385 'assetTypes': asset_type
,
387 if asset_type
in asset_types_query
:
388 query
.update(asset_types_query
[asset_type
])
389 cur_formats
, cur_subtitles
= self
._extract
_theplatform
_smil
(update_url_query(
390 main_smil_url
or smil_url
, query
), video_id
, 'Downloading SMIL data for %s' % asset_type
)
391 formats
.extend(cur_formats
)
392 subtitles
= self
._merge
_subtitles
(subtitles
, cur_subtitles
)
395 'url': thumbnail
['plfile$url'],
396 'width': int_or_none(thumbnail
.get('plfile$width')),
397 'height': int_or_none(thumbnail
.get('plfile$height')),
398 } for thumbnail
in entry
.get('media$thumbnails', [])]
400 timestamp
= int_or_none(entry
.get('media$availableDate'), scale
=1000)
401 categories
= [item
['media$name'] for item
in entry
.get('media$categories', [])]
403 ret
= self
._extract
_theplatform
_metadata
('%s/%s' % (provider_id
, first_video_id
), video_id
)
404 subtitles
= self
._merge
_subtitles
(subtitles
, ret
['subtitles'])
408 'subtitles': subtitles
,
409 'thumbnails': thumbnails
,
410 'duration': duration
,
411 'timestamp': timestamp
,
412 'categories': categories
,
415 ret
.update(custom_fields(entry
))
419 def _real_extract(self
, url
):
420 mobj
= self
._match
_valid
_url
(url
)
422 video_id
= mobj
.group('id')
423 provider_id
= mobj
.group('provider_id')
424 feed_id
= mobj
.group('feed_id')
425 filter_query
= mobj
.group('filter')
427 return self
._extract
_feed
_info
(provider_id
, feed_id
, filter_query
, video_id
)