13 import urllib
.response
15 from ..utils
.networking
import clean_proxies
16 from .common
import InfoExtractor
17 from ..aes
import aes_ecb_decrypt
31 def add_opener(ydl
, handler
): # FIXME: Create proper API in .networking
32 """Add a handler for opening URLs, like _download_webpage"""
33 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
34 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
35 rh
= ydl
._request
_director
.handlers
['Urllib']
36 if 'abematv-license' in rh
._SUPPORTED
_URL
_SCHEMES
:
38 headers
= ydl
.params
['http_headers'].copy()
39 proxies
= ydl
.proxies
.copy()
40 clean_proxies(proxies
, headers
)
41 opener
= rh
._get
_instance
(cookiejar
=ydl
.cookiejar
, proxies
=proxies
)
42 assert isinstance(opener
, urllib
.request
.OpenerDirector
)
43 opener
.add_handler(handler
)
44 rh
._SUPPORTED
_URL
_SCHEMES
= (*rh
._SUPPORTED
_URL
_SCHEMES
, 'abematv-license')
47 class AbemaLicenseHandler(urllib
.request
.BaseHandler
):
49 STRTABLE
= '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
50 HKEY
= b
'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
52 def __init__(self
, ie
: 'AbemaTVIE'):
53 # the protocol that this should really handle is 'abematv-license://'
54 # abematv_license_open is just a placeholder for development purposes
55 # ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
56 setattr(self
, 'abematv-license_open', getattr(self
, 'abematv_license_open'))
59 def _get_videokey_from_ticket(self
, ticket
):
60 to_show
= self
.ie
.get_param('verbose', False)
61 media_token
= self
.ie
._get
_media
_token
(to_show
=to_show
)
63 license_response
= self
.ie
._download
_json
(
64 'https://license.abema.io/abematv-hls', None, note
='Requesting playback license' if to_show
else False,
65 query
={'t': media_token}
,
71 'Content-Type': 'application/json',
74 res
= decode_base_n(license_response
['k'], table
=self
.STRTABLE
)
75 encvideokey
= bytes_to_intlist(struct
.pack('>QQ', res
>> 64, res
& 0xffffffffffffffff))
78 binascii
.unhexlify(self
.HKEY
),
79 (license_response
['cid'] + self
.ie
._DEVICE
_ID
).encode('utf-8'),
80 digestmod
=hashlib
.sha256
)
81 enckey
= bytes_to_intlist(h
.digest())
83 return intlist_to_bytes(aes_ecb_decrypt(encvideokey
, enckey
))
85 def abematv_license_open(self
, url
):
86 url
= url
.get_full_url() if isinstance(url
, urllib
.request
.Request
) else url
87 ticket
= urllib
.parse
.urlparse(url
).netloc
88 response_data
= self
._get
_videokey
_from
_ticket
(ticket
)
89 return urllib
.response
.addinfourl(io
.BytesIO(response_data
), headers
={
90 'Content-Length': str(len(response_data
)),
94 class AbemaTVBaseIE(InfoExtractor
):
99 _SECRETKEY
= b
'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe'
102 def _generate_aks(cls
, deviceid
):
103 deviceid
= deviceid
.encode('utf-8')
104 # add 1 hour and then drop minute and secs
105 ts_1hour
= int((time_seconds() // 3600 + 1) * 3600)
106 time_struct
= time
.gmtime(ts_1hour
)
107 ts_1hour_str
= str(ts_1hour
).encode('utf-8')
113 h
= hmac
.new(cls
._SECRETKEY
, digestmod
=hashlib
.sha256
)
119 for i
in range(count
):
122 def mix_twist(nonce
):
124 mix_once(base64
.urlsafe_b64encode(tmp
).rstrip(b
'=') + nonce
)
126 mix_once(cls
._SECRETKEY
)
127 mix_tmp(time_struct
.tm_mon
)
129 mix_tmp(time_struct
.tm_mday
% 5)
130 mix_twist(ts_1hour_str
)
131 mix_tmp(time_struct
.tm_hour
% 5)
133 return base64
.urlsafe_b64encode(tmp
).rstrip(b
'=').decode('utf-8')
135 def _get_device_token(self
):
137 return self
._USERTOKEN
139 username
, _
= self
._get
_login
_info
()
140 AbemaTVBaseIE
._USERTOKEN
= username
and self
.cache
.load(self
._NETRC
_MACHINE
, username
)
141 if AbemaTVBaseIE
._USERTOKEN
:
142 # try authentication with locally stored token
144 self
._get
_media
_token
(True)
146 except ExtractorError
as e
:
147 self
.report_warning(f
'Failed to login with cached user token; obtaining a fresh one ({e})')
149 AbemaTVBaseIE
._DEVICE
_ID
= str(uuid
.uuid4())
150 aks
= self
._generate
_aks
(self
._DEVICE
_ID
)
151 user_data
= self
._download
_json
(
152 'https://api.abema.io/v1/users', None, note
='Authorizing',
154 'deviceId': self
._DEVICE
_ID
,
155 'applicationKeySecret': aks
,
158 'Content-Type': 'application/json',
160 AbemaTVBaseIE
._USERTOKEN
= user_data
['token']
162 add_opener(self
._downloader
, AbemaLicenseHandler(self
))
163 return self
._USERTOKEN
165 def _get_media_token(self
, invalidate
=False, to_show
=True):
166 if not invalidate
and self
._MEDIATOKEN
:
167 return self
._MEDIATOKEN
169 AbemaTVBaseIE
._MEDIATOKEN
= self
._download
_json
(
170 'https://api.abema.io/v1/media/token', None, note
='Fetching media token' if to_show
else False,
173 'osVersion': '6.0.1',
175 'osTimezone': 'Asia/Tokyo',
177 'appVersion': '3.27.1'
179 'Authorization': f
'bearer {self._get_device_token()}',
182 return self
._MEDIATOKEN
184 def _call_api(self
, endpoint
, video_id
, query
=None, note
='Downloading JSON metadata'):
185 return self
._download
_json
(
186 f
'https://api.abema.io/{endpoint}', video_id
, query
=query
or {},
189 'Authorization': f
'bearer {self._get_device_token()}',
192 def _extract_breadcrumb_list(self
, webpage
, video_id
):
193 for jld
in re
.finditer(
194 r
'(?is)</span></li></ul><script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>(?P
<json_ld
>.+?
)</script
>',
196 jsonld = self._parse_json(jld.group('json_ld
'), video_id, fatal=False)
197 if traverse_obj(jsonld, '@type') != 'BreadcrumbList
':
199 items = traverse_obj(jsonld, ('itemListElement
', ..., 'name
'))
205 class AbemaTVIE(AbemaTVBaseIE):
206 _VALID_URL = r'https?
://abema\
.tv
/(?P
<type>now
-on
-air|video
/episode|channels
/.+?
/slots
)/(?P
<id>[^?
/]+)'
207 _NETRC_MACHINE = 'abematv
'
209 'url
': 'https
://abema
.tv
/video
/episode
/194-25_s
2_p
1',
211 'id': '194-25_s
2_p
1',
212 'title
': '第
1話 「チーズケーキ」 「モーニング再び」
',
215 'episode
': '第
1話 「チーズケーキ」 「モーニング再び」
',
220 'url
': 'https
://abema
.tv
/channels
/anime
-live2
/slots
/E8tvAnMJ7a9a5d
',
222 'id': 'E8tvAnMJ7a9a5d
',
223 'title
': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ
72時間】
',
224 'series
': 'ゆるキャン△ SEASON2
',
225 'episode
': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ
72時間】
',
228 'description
': 'md5
:9c5a3172ae763278f9303922f0ea5b17
',
232 'url
': 'https
://abema
.tv
/video
/episode
/87-877_s
1282_p
31047',
234 'id': 'E8tvAnMJ7a9a5d
',
236 'description
': 'md5
:56d4fc1b4f7769ded5f923c55bb4695d
',
237 'thumbnail
': r're
:https
://hayabusa\
.io
/.+',
239 'episode
': '第
5話『光射す』
',
243 'url
': 'https
://abema
.tv
/now
-on
-air
/abema
-anime
',
247 # 'title
': '女子高生の無駄づかい 全話一挙【無料ビデオ
72時間】
',
248 'description
': 'md5
:55f2e61f46a17e9230802d7bcc913d5f
',
251 'skip
': 'Not supported until yt
-dlp implements native live downloader OR AbemaTV can start a local HTTP server
',
255 def _perform_login(self, username, password):
256 self._get_device_token()
257 if self.cache.load(self._NETRC_MACHINE, username) and self._get_media_token():
258 self.write_debug('Skipping logging
in')
261 if '@' in username: # don't strictly check
if it
's email address or not
262 ep, method = 'user
/email
', 'email
'
264 ep, method = 'oneTimePassword
', 'userId
'
266 login_response = self._download_json(
267 f'https
://api
.abema
.io
/v1
/auth
/{ep}
', None, note='Logging
in',
271 }).encode('utf
-8'), headers={
272 'Authorization
': f'bearer {self._get_device_token()}
',
273 'Origin
': 'https
://abema
.tv
',
274 'Referer
': 'https
://abema
.tv
/',
275 'Content
-Type
': 'application
/json
',
278 AbemaTVBaseIE._USERTOKEN = login_response['token
']
279 self._get_media_token(True)
280 self.cache.store(self._NETRC_MACHINE, username, AbemaTVBaseIE._USERTOKEN)
282 def _real_extract(self, url):
283 # starting download using infojson from this extractor is undefined behavior,
284 # and never be fixed in the future; you must trigger downloads by directly specifying URL.
285 # (unless there's a way to hook before downloading by extractor
)
286 video_id
, video_type
= self
._match
_valid
_url
(url
).group('id', 'type')
288 'Authorization': 'Bearer ' + self
._get
_device
_token
(),
290 video_type
= video_type
.split('/')[-1]
292 webpage
= self
._download
_webpage
(url
, video_id
)
293 canonical_url
= self
._search
_regex
(
294 r
'<link\s+rel="canonical"\s*href="(.+?)"', webpage
, 'canonical URL',
296 info
= self
._search
_json
_ld
(webpage
, video_id
, default
={})
298 title
= self
._search
_regex
(
299 r
'<span\s*class=".+?EpisodeTitleBlock__title">(.+?)</span>', webpage
, 'title', default
=None)
302 for jld
in re
.finditer(
303 r
'(?is)<span\s*class="com-m-Thumbnail__image">(?:</span>)?<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>(?P
<json_ld
>.+?
)</script
>',
305 jsonld = self._parse_json(jld.group('json_ld
'), video_id, fatal=False)
309 title = jsonld.get('caption
')
310 if not title and video_type == 'now
-on
-air
':
311 if not self._TIMETABLE:
312 # cache the timetable because it goes to 5MiB in size (!!)
313 self._TIMETABLE = self._download_json(
314 'https
://api
.abema
.io
/v1
/timetable
/dataSet?debug
=false
', video_id,
316 now = time_seconds(hours=9)
317 for slot in self._TIMETABLE.get('slots
', []):
318 if slot.get('channelId
') != video_id:
320 if slot['startAt
'] <= now and now < slot['endAt
']:
321 title = slot['title
']
324 # read breadcrumb on top of page
325 breadcrumb = self._extract_breadcrumb_list(webpage, video_id)
327 # breadcrumb list translates to: (e.g. 1st test for this IE)
328 # Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title)
330 info['series
'] = breadcrumb[-2]
331 info['episode
'] = breadcrumb[-1]
333 title = info['episode
']
335 description = self._html_search_regex(
336 (r'<p\s
+class="com-video-EpisodeDetailsBlock__content"><span\s
+class=".+?">(.+?
)</span
></p
><div
',
337 r'<span\s
+class=".+?SlotSummary.+?">(.+?
)</span
></div
><div
',),
338 webpage, 'description
', default=None, group=1)
340 og_desc = self._html_search_meta(
341 ('description
', 'og
:description
', 'twitter
:description
'), webpage)
343 description = re.sub(r'''(?sx)
345 アニメの動画を無料で見るならABEMA!| # anime
346 等、.+ # applies for most of categories
350 # canonical URL may contain series and episode number
351 mobj = re.search(r's(\d
+)_p(\d
+)$
', canonical_url)
353 seri = int_or_none(mobj.group(1), default=float('inf
'))
354 epis = int_or_none(mobj.group(2), default=float('inf
'))
355 info['series_number
'] = seri if seri < 100 else None
356 # some anime like Detective Conan (though not available in AbemaTV)
357 # has more than 1000 episodes (1026 as of 2021/11/15)
358 info['episode_number
'] = epis if epis < 2000 else None
360 is_live, m3u8_url = False, None
361 if video_type == 'now
-on
-air
':
363 channel_url = 'https
://api
.abema
.io
/v1
/channels
'
364 if video_id == 'news
-global':
365 channel_url = update_url_query(channel_url, {'division': '1'})
366 onair_channels = self._download_json(channel_url, video_id)
367 for ch in onair_channels['channels
']:
368 if video_id == ch['id']:
369 m3u8_url = ch['playback
']['hls
']
372 raise ExtractorError(f'Cannot find on
-air {video_id} channel
.', expected=True)
373 elif video_type == 'episode
':
374 api_response = self._download_json(
375 f'https
://api
.abema
.io
/v1
/video
/programs
/{video_id}
', video_id,
376 note='Checking playability
',
378 ondemand_types = traverse_obj(api_response, ('terms
', ..., 'onDemandType
'))
379 if 3 not in ondemand_types:
380 # cannot acquire decryption key for these streams
381 self.report_warning('This
is a premium
-only stream
')
382 info.update(traverse_obj(api_response, {
383 'series
': ('series
', 'title
'),
384 'season
': ('season
', 'title
'),
385 'season_number
': ('season
', 'sequence
'),
386 'episode_number
': ('episode
', 'number
'),
389 title = traverse_obj(api_response, ('episode
', 'title
'))
391 description = traverse_obj(api_response, ('episode
', 'content
'))
393 m3u8_url = f'https
://vod
-abematv
.akamaized
.net
/program
/{video_id}
/playlist
.m3u8
'
394 elif video_type == 'slots
':
395 api_response = self._download_json(
396 f'https
://api
.abema
.io
/v1
/media
/slots
/{video_id}
', video_id,
397 note='Checking playability
',
399 if not traverse_obj(api_response, ('slot
', 'flags
', 'timeshiftFree
'), default=False):
400 self.report_warning('This
is a premium
-only stream
')
402 m3u8_url = f'https
://vod
-abematv
.akamaized
.net
/slot
/{video_id}
/playlist
.m3u8
'
404 raise ExtractorError('Unreachable
')
407 self.report_warning("This is a livestream; yt-dlp doesn't support downloading natively
, but FFmpeg cannot handle m3u8 manifests
from AbemaTV
")
408 self.report_warning('Please consider using Streamlink to download these streams (https://github.com/streamlink/streamlink)')
409 formats = self._extract_m3u8_formats(
410 m3u8_url, video_id, ext='mp4', live=is_live)
415 'description': description,
422 class AbemaTVTitleIE(AbemaTVBaseIE):
423 _VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
427 'url': 'https://abema.tv/video/title/90-1597',
430 'title': 'シャッフルアイランド',
432 'playlist_mincount': 2,
434 'url': 'https://abema.tv/video/title/193-132',
437 'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
439 'playlist_mincount': 16,
441 'url': 'https://abema.tv/video/title/25-102',
444 'title': 'ソードアート・オンライン アリシゼーション',
446 'playlist_mincount': 24,
449 def _fetch_page(self, playlist_id, series_version, page):
450 programs = self._call_api(
451 f'v1/video/series/{playlist_id}/programs', playlist_id,
452 note=f'Downloading page {page + 1}',
454 'seriesVersion': series_version,
455 'offset': str(page * self._PAGE_SIZE),
457 'limit': str(self._PAGE_SIZE),
460 self.url_result(f'https://abema.tv/video/episode/{x}')
461 for x in traverse_obj(programs, ('programs', ..., 'id')))
463 def _entries(self, playlist_id, series_version):
464 return OnDemandPagedList(
465 functools.partial(self._fetch_page, playlist_id, series_version),
468 def _real_extract(self, url):
469 playlist_id = self._match_id(url)
470 series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id)
472 return self.playlist_result(
473 self._entries(playlist_id, series_info['version']), playlist_id=playlist_id,
474 playlist_title=series_info.get('title'),
475 playlist_description=series_info.get('content'))