2 from __future__
import unicode_literals
9 from .common
import InfoExtractor
10 from ..compat
import (
12 compat_urllib_parse_urlencode
,
13 compat_urllib_parse_unquote
15 from .openload
import PhantomJSwrapper
23 get_element_by_attribute
,
40 return hashlib
.md5(text
.encode('utf-8')).hexdigest()
43 class IqiyiSDK(object):
44 def __init__(self
, target
, ip
, timestamp
):
47 self
.timestamp
= timestamp
51 return compat_str(sum(map(lambda p
: int(p
, 16), list(data
))))
55 if isinstance(num
, int):
57 return compat_str(sum(map(int, num
)))
60 even
= self
.digit_sum(compat_str(self
.timestamp
)[::2])
61 odd
= self
.digit_sum(compat_str(self
.timestamp
)[1::2])
64 def preprocess(self
, chunksize
):
65 self
.target
= md5_text(self
.target
)
67 for i
in range(32 // chunksize
):
68 chunks
.append(self
.target
[chunksize
* i
:chunksize
* (i
+ 1)])
70 chunks
.append(self
.target
[32 - 32 % chunksize
:])
71 return chunks
, list(map(int, self
.ip
.split('.')))
73 def mod(self
, modulus
):
74 chunks
, ip
= self
.preprocess(32)
75 self
.target
= chunks
[0] + ''.join(map(lambda p
: compat_str(p
% modulus
), ip
))
77 def split(self
, chunksize
):
84 chunks
, ip
= self
.preprocess(chunksize
)
86 for i
in range(len(chunks
)):
87 ip_part
= compat_str(ip
[i
] % modulus_map
[chunksize
]) if i
< 4 else ''
89 ret
+= ip_part
+ chunks
[i
]
91 ret
+= chunks
[i
] + ip_part
94 def handle_input16(self
):
95 self
.target
= md5_text(self
.target
)
96 self
.target
= self
.split_sum(self
.target
[:16]) + self
.target
+ self
.split_sum(self
.target
[16:])
98 def handle_input8(self
):
99 self
.target
= md5_text(self
.target
)
102 part
= self
.target
[8 * i
:8 * (i
+ 1)]
103 ret
+= self
.split_sum(part
) + part
107 self
.target
= md5_text(self
.target
)
108 self
.target
= self
.split_sum(self
.target
) + self
.target
110 def date(self
, scheme
):
111 self
.target
= md5_text(self
.target
)
112 d
= time
.localtime(self
.timestamp
)
114 'y': compat_str(d
.tm_year
),
115 'm': '%02d' % d
.tm_mon
,
116 'd': '%02d' % d
.tm_mday
,
118 self
.target
+= ''.join(map(lambda c
: strings
[c
], list(scheme
)))
120 def split_time_even_odd(self
):
121 even
, odd
= self
.even_odd()
122 self
.target
= odd
+ md5_text(self
.target
) + even
124 def split_time_odd_even(self
):
125 even
, odd
= self
.even_odd()
126 self
.target
= even
+ md5_text(self
.target
) + odd
128 def split_ip_time_sum(self
):
129 chunks
, ip
= self
.preprocess(32)
130 self
.target
= compat_str(sum(ip
)) + chunks
[0] + self
.digit_sum(self
.timestamp
)
132 def split_time_ip_sum(self
):
133 chunks
, ip
= self
.preprocess(32)
134 self
.target
= self
.digit_sum(self
.timestamp
) + chunks
[0] + compat_str(sum(ip
))
137 class IqiyiSDKInterpreter(object):
138 def __init__(self
, sdk_code
):
139 self
.sdk_code
= sdk_code
141 def run(self
, target
, ip
, timestamp
):
142 self
.sdk_code
= decode_packed_codes(self
.sdk_code
)
144 functions
= re
.findall(r
'input=([a-zA-Z0-9]+)\(input', self
.sdk_code
)
146 sdk
= IqiyiSDK(target
, ip
, timestamp
)
149 'handleSum': sdk
.handleSum
,
150 'handleInput8': sdk
.handle_input8
,
151 'handleInput16': sdk
.handle_input16
,
152 'splitTimeEvenOdd': sdk
.split_time_even_odd
,
153 'splitTimeOddEven': sdk
.split_time_odd_even
,
154 'splitIpTimeSum': sdk
.split_ip_time_sum
,
155 'splitTimeIpSum': sdk
.split_time_ip_sum
,
157 for function
in functions
:
158 if re
.match(r
'mod\d+', function
):
159 sdk
.mod(int(function
[3:]))
160 elif re
.match(r
'date[ymd]{3}', function
):
161 sdk
.date(function
[4:])
162 elif re
.match(r
'split\d+', function
):
163 sdk
.split(int(function
[5:]))
164 elif function
in other_functions
:
165 other_functions
[function
]()
167 raise ExtractorError('Unknown function %s' % function
)
172 class IqiyiIE(InfoExtractor
):
176 _VALID_URL
= r
'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html'
178 _NETRC_MACHINE
= 'iqiyi'
181 'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
182 # MD5 checksum differs on my machine and Travis CI
184 'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
186 'title': '美国德州空中惊现奇异云团 酷似UFO',
189 'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
190 'md5': 'b7dc800a4004b1b57749d9abae0472da',
192 'id': 'e3f585b550a280af23c98b6cb2be19fb',
194 # This can be either Simplified Chinese or Traditional Chinese
195 'title': r
're:^(?:名侦探柯南 国语版:第752集 迫近灰原秘密的黑影 下篇|名偵探柯南 國語版:第752集 迫近灰原秘密的黑影 下篇)$',
197 'skip': 'Geo-restricted to China',
199 'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
200 'only_matching': True,
202 'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
203 'only_matching': True,
205 'url': 'http://yule.iqiyi.com/pcb.html',
207 'id': '4a0af228fddb55ec96398a364248ed7f',
209 'title': '第2017-04-21期 女艺人频遭极端粉丝骚扰',
212 # VIP-only video. The first 2 parts (6 minutes) are available without login
213 # MD5 sums omitted as values are different on Travis CI and my machine
214 'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
216 'id': 'f3cf468b39dddb30d676f89a91200dc1',
220 'skip': 'Geo-restricted to China',
222 'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
227 'playlist_count': 101,
229 'url': 'http://www.pps.tv/w_19rrbav0ph.html',
230 'only_matching': True,
234 '96': 1, # 216p, 240p
240 '5': 6, # 1072p, 1080p
246 # public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
247 N
= 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
250 return ohdave_rsa_encrypt(data
, e
, N
)
252 def _perform_login(self
, username
, password
):
254 data
= self
._download
_json
(
255 'http://kylin.iqiyi.com/get_token', None,
256 note
='Get token for logging', errnote
='Unable to get token for logging')
258 timestamp
= int(time
.time())
259 target
= '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
260 username
, self
._rsa
_fun
(password
.encode('utf-8')))
262 interp
= IqiyiSDKInterpreter(sdk
)
263 sign
= interp
.run(target
, data
['ip'], timestamp
)
265 validation_params
= {
267 'server': 'BEA3AA1908656AABCCFF76582C4C6660',
268 'token': data
['token'],
269 'bird_src': 'f8d91d57af224da7893dd397d52d811a',
273 validation_result
= self
._download
_json
(
274 'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params
), None,
275 note
='Validate credentials', errnote
='Unable to validate credentials')
278 'P00107': 'please login via the web interface and enter the CAPTCHA code',
279 'P00117': 'bad username or password',
282 code
= validation_result
['code']
284 msg
= MSG_MAP
.get(code
)
286 msg
= 'error %s' % code
287 if validation_result
.get('msg'):
288 msg
+= ': ' + validation_result
['msg']
289 self
.report_warning('unable to log in: ' + msg
)
294 def get_raw_data(self
, tvid
, video_id
):
295 tm
= int(time
.time() * 1000)
297 key
= 'd5fb4bd9d50c4be6948c97edd7254b0e'
298 sc
= md5_text(compat_str(tm
) + key
+ tvid
)
302 'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
307 return self
._download
_json
(
308 'http://cache.m.iqiyi.com/jp/tmts/%s/%s/' % (tvid
, video_id
),
309 video_id
, transform_source
=lambda s
: remove_start(s
, 'var tvInfoJs='),
310 query
=params
, headers
=self
.geo_verification_headers())
312 def _extract_playlist(self
, webpage
):
316 r
'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
321 album_id
= self
._search
_regex
(
322 r
'albumId\s*:\s*(\d+),', webpage
, 'album ID')
323 album_title
= self
._search
_regex
(
324 r
'data-share-title="([^"]+)"', webpage
, 'album title', fatal
=False)
326 entries
= list(map(self
.url_result
, links
))
328 # Start from 2 because links in the first page are already on webpage
329 for page_num
in itertools
.count(2):
330 pagelist_page
= self
._download
_webpage
(
331 'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id
, page_num
, PAGE_SIZE
),
333 note
='Download playlist page %d' % page_num
,
334 errnote
='Failed to download playlist page %d' % page_num
)
335 pagelist
= self
._parse
_json
(
336 remove_start(pagelist_page
, 'var tvInfoJs='), album_id
)
337 vlist
= pagelist
['data']['vlist']
339 entries
.append(self
.url_result(item
['vurl']))
340 if len(vlist
) < PAGE_SIZE
:
343 return self
.playlist_result(entries
, album_id
, album_title
)
345 def _real_extract(self
, url
):
346 webpage
= self
._download
_webpage
(
347 url
, 'temp_id', note
='download video page')
349 # There's no simple way to determine whether an URL is a playlist or not
350 # Sometimes there are playlist links in individual videos, so treat it
351 # as a single video first
352 tvid
= self
._search
_regex
(
353 r
'data-(?:player|shareplattrigger)-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid', default=None)
355 playlist_result = self._extract_playlist(webpage)
357 return playlist_result
358 raise ExtractorError('Can\'t find any video')
360 video_id = self._search_regex(
361 r'data-(?:player|shareplattrigger)-videoid\s*=\s*[\'"]([a
-f\d
]+)', webpage, 'video_id
')
365 raw_data = self.get_raw_data(tvid, video_id)
367 if raw_data['code
'] != 'A00000
':
368 if raw_data['code
'] == 'A00111
':
369 self.raise_geo_restricted()
370 raise ExtractorError('Unable to load data
. Error code
: ' + raw_data['code
'])
372 data = raw_data['data
']
374 for stream in data['vidl
']:
375 if 'm3utx
' not in stream:
377 vd = compat_str(stream['vd
'])
379 'url
': stream['m3utx
'],
382 'quality
': self._FORMATS_MAP.get(vd, -1),
383 'protocol
': 'm3u8_native
',
389 self._sleep(5, video_id)
391 self._sort_formats(formats)
392 title = (get_element_by_id('widget
-videotitle
', webpage)
393 or clean_html(get_element_by_attribute('class', 'mod
-play
-tit
', webpage))
394 or self._html_search_regex(r'<span
[^
>]+data
-videochanged
-title
="word"[^
>]*>([^
<]+)</span
>', webpage, 'title
'))
403 class IqIE(InfoExtractor):
405 IE_DESC = 'International version of iQiyi
'
406 _VALID_URL = r'https?
://(?
:www\
.)?iq\
.com
/play
/(?
:[\w
%-]*-)?
(?P
<id>\w
+)'
408 'url
': 'https
://www
.iq
.com
/play
/one
-piece
-episode
-1000-1ma1i6ferf4
',
409 'md5
': '2d7caf6eeca8a32b407094b33b757d39
',
413 'title
': '航海王 第
1000集
',
414 'description
': 'Subtitle available on Sunday
4PM(GMT
+8)
.',
416 'timestamp
': 1637488203,
417 'upload_date
': '20211121',
418 'episode_number
': 1000,
419 'episode
': 'Episode
1000',
420 'series
': 'One Piece
',
422 'average_rating
': float,
427 'expected_warnings
': ['format
is restricted
']
429 # VIP-restricted video
430 'url
': 'https
://www
.iq
.com
/play
/mermaid
-in-the
-fog
-2021-gbdpx13bs4
',
431 'only_matching
': True
456 console.log(page.evaluate(function() {
457 var tvid = "%(tvid)s"; var vid = "%(vid)s"; var src = "%(src)s";
458 var uid = "%(uid)s"; var dfp = "%(dfp)s"; var mode = "%(mode)s"; var lang = "%(lang)s";
459 var bid_list = %(bid_list)s; var ut_list = %(ut_list)s; var tm = new Date().getTime();
460 var cmd5x_func = %(cmd5x_func)s; var cmd5x_exporter = {}; cmd5x_func({}, cmd5x_exporter, {}); var cmd5x = cmd5x_exporter.cmd5x;
461 var authKey = cmd5x(cmd5x('') + tm + '' + tvid);
462 var k_uid = Array.apply(null, Array(32)).map(function() {return Math.floor(Math.random() * 15).toString(16)}).join('');
464 bid_list.forEach(function(bid) {
489 'prio
': JSON.stringify({
503 'k_ft1
': 141287244169348,
504 'k_ft4
': 34359746564,
506 'bop
': JSON.stringify({
512 for (var prop in query) {
513 enc_params.push(encodeURIComponent(prop) + '=' + encodeURIComponent(query[prop]));
515 ut_list.forEach(function(ut) {
516 enc_params.push('ut
=' + ut);
518 var dash_path = '/dash?
' + enc_params.join('&'); dash_path += '&vf
=' + cmd5x(dash_path);
519 dash_paths[bid] = dash_path;
521 return JSON.stringify(dash_paths);
526 def _extract_vms_player_js(self, webpage, video_id):
527 player_js_cache = self._downloader.cache.load('iq
', 'player_js
')
529 return player_js_cache
530 webpack_js_url = self._proto_relative_url(self._search_regex(
531 r'<script src
="((?:https?)?//stc.iqiyipic.com/_next/static/chunks/webpack-\w+\.js)"', webpage, 'webpack URL
'))
532 webpack_js = self._download_webpage(webpack_js_url, video_id, note='Downloading webpack JS
', errnote='Unable to download webpack JS
')
533 webpack_map1, webpack_map2 = [self._parse_json(js_map, video_id, transform_source=js_to_json) for js_map in self._search_regex(
534 r'\
(({[^}
]*})\
[\w
+\
][^\
)]*\
)\s
*\
+\s
*["\']\.["\']\s
*\
+\s
*({[^}
]*})\
[\w
+\
]\
+["\']\.js', webpack_js, 'JS locations', group=(1, 2))]
535 for module_index in reversed(list(webpack_map2.keys())):
536 module_js = self._download_webpage(
537 f'https://stc.iqiyipic.com/_next/static/chunks/{webpack_map1.get(module_index, module_index)}.{webpack_map2[module_index]}.js',
538 video_id, note=f'Downloading #{module_index} module JS', errnote='Unable to download module JS', fatal=False) or ''
539 if 'vms request' in module_js:
540 self._downloader.cache.store('iq', 'player_js', module_js)
542 raise ExtractorError('Unable to extract player JS')
544 def _extract_cmd5x_function(self, webpage, video_id):
545 return self._search_regex(r',\s*(function\s*\([^\)]*\)\s*{\s*var _qda.+_qdc\(\)\s*})\s*,',
546 self._extract_vms_player_js(webpage, video_id), 'signature function')
548 def _update_bid_tags(self, webpage, video_id):
549 extracted_bid_tags = self._parse_json(
551 r'arguments\[1\][^,]*,\s*function\s*\([^\)]*\)\s*{\s*"use strict";?\s*var \w=({.+}})\s*,\s*\w\s*=\s*{\s*getNewVd',
552 self._extract_vms_player_js(webpage, video_id), 'video tags', default=''),
553 video_id, transform_source=js_to_json, fatal=False)
554 if not extracted_bid_tags:
557 bid: traverse_obj(extracted_bid_tags, (bid, 'value'), expected_type=str, default=self._BID_TAGS.get(bid))
558 for bid in extracted_bid_tags.keys()
561 def _get_cookie(self, name, default=None):
562 cookie = self._get_cookies('https://iq.com/').get(name)
563 return cookie.value if cookie else default
565 def _real_extract(self, url):
566 video_id = self._match_id(url)
567 webpage = self._download_webpage(url, video_id)
568 self._update_bid_tags(webpage, video_id)
570 next_props = self._search_nextjs_data(webpage, video_id)['props']
571 page_data = next_props['initialState']['play']
572 video_info = page_data['curVideoInfo']
576 self._get_cookie('I00002', '{}'), video_id, transform_source=compat_urllib_parse_unquote, fatal=False),
577 ('data', 'uid'), default=0)
580 vip_data = self._download_json(
581 'https://pcw-api.iq.com/api/vtype', video_id, note='Downloading VIP data', errnote='Unable to download VIP data', query={
584 'modeCode': self._get_cookie('mod', 'intl'),
585 'langCode': self._get_cookie('lang', 'en_us'),
586 'deviceId': self._get_cookie('QC005', '')
588 ut_list = traverse_obj(vip_data, ('data', 'all_vip', ..., 'vipType'), expected_type=str_or_none, default=[])
592 # bid 0 as an initial format checker
593 dash_paths = self._parse_json(PhantomJSwrapper(self).get(
594 url, html='<!DOCTYPE html>', video_id=video_id, note2='Executing signature code', jscode=self._DASH_JS % {
595 'tvid': video_info['tvId'],
596 'vid': video_info['vid'],
597 'src': traverse_obj(next_props, ('initialProps', 'pageProps', 'ptid'),
598 expected_type=str, default='04022001010011000000'),
600 'dfp': self._get_cookie('dfp', ''),
601 'mode': self._get_cookie('mod', 'intl'),
602 'lang': self._get_cookie('lang', 'en_us'),
603 'bid_list': '[' + ','.join(['0', *self._BID_TAGS.keys()]) + ']',
604 'ut_list': '[' + ','.join(ut_list) + ']',
605 'cmd5x_func': self._extract_cmd5x_function(webpage, video_id),
606 })[1].strip(), video_id)
608 formats, subtitles = [], {}
609 initial_format_data = self._download_json(
610 urljoin('https://cache-video.iq.com', dash_paths['0']), video_id,
611 note='Downloading initial video format info', errnote='Unable to download initial video format info')['data']
613 preview_time = traverse_obj(
614 initial_format_data, ('boss_ts', (None, 'data'), ('previewTime', 'rtime')), expected_type=float_or_none, get_all=False)
615 if traverse_obj(initial_format_data, ('boss_ts', 'data', 'prv'), expected_type=int_or_none):
616 self.report_warning('This preview video is limited%s' % format_field(preview_time, template=' to %s seconds'))
618 # TODO: Extract audio-only formats
619 for bid in set(traverse_obj(initial_format_data, ('program', 'video', ..., 'bid'), expected_type=str_or_none, default=[])):
620 dash_path = dash_paths.get(bid)
622 self.report_warning(f'Unknown format id: {bid}. It is currently not being extracted')
624 format_data = traverse_obj(self._download_json(
625 urljoin('https://cache-video.iq.com', dash_path), video_id,
626 note=f'Downloading format data for {self._BID_TAGS[bid]}', errnote='Unable to download format data',
627 fatal=False), 'data', expected_type=dict)
629 video_format = next((video_format for video_format in traverse_obj(
630 format_data, ('program', 'video', ...), expected_type=dict, default=[]) if str(video_format['bid']) == bid), {})
631 extracted_formats = []
632 if video_format.get('m3u8Url'):
633 extracted_formats.extend(self._extract_m3u8_formats(
634 urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['m3u8Url']),
635 'mp4', m3u8_id=bid, fatal=False))
636 if video_format.get('mpdUrl'):
637 # TODO: Properly extract mpd hostname
638 extracted_formats.extend(self._extract_mpd_formats(
639 urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['mpdUrl']),
640 mpd_id=bid, fatal=False))
641 if video_format.get('m3u8'):
642 ff = video_format.get('ff', 'ts')
644 m3u8_formats, _ = self._parse_m3u8_formats_and_subtitles(
645 video_format['m3u8'], ext='mp4', m3u8_id=bid, fatal=False)
646 extracted_formats.extend(m3u8_formats)
648 mpd_data = traverse_obj(
649 self._parse_json(video_format['m3u8'], video_id, fatal=False), ('payload', ..., 'data'), expected_type=str)
652 mpd_formats, _ = self._parse_mpd_formats_and_subtitles(
653 mpd_data, bid, format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'))
654 extracted_formats.extend(mpd_formats)
656 self.report_warning(f'{ff} formats are currently not supported')
658 if not extracted_formats:
659 if video_format.get('s'):
660 self.report_warning(f'{self._BID_TAGS[bid]} format is restricted')
662 self.report_warning(f'Unable to extract {self._BID_TAGS[bid]} format')
663 for f in extracted_formats:
665 'quality': qualities(list(self._BID_TAGS.keys()))(bid),
666 'format_note': self._BID_TAGS[bid],
667 **parse_resolution(video_format.get('scrsz'))
669 formats.extend(extracted_formats)
671 self._sort_formats(formats)
673 for sub_format in traverse_obj(initial_format_data, ('program', 'stl', ...), expected_type=dict, default=[]):
674 lang = self._LID_TAGS.get(str_or_none(sub_format.get('lid')), sub_format.get('_name'))
675 subtitles.setdefault(lang, []).extend([{
677 'url': urljoin(initial_format_data.get('dstl', 'http://meta.video.iqiyi.com'), sub_format[format_key])
678 } for format_key, format_ext in [('srt', 'srt'), ('webvtt', 'vtt')] if sub_format.get(format_key)])
680 extra_metadata = page_data.get('albumInfo') if video_info.get('albumId') and page_data.get('albumInfo') else video_info
683 'title': video_info['name'],
685 'subtitles': subtitles,
686 'description': video_info.get('mergeDesc'),
687 'duration': parse_duration(video_info.get('len')),
688 'age_limit': parse_age_limit(video_info.get('rating')),
689 'average_rating': traverse_obj(page_data, ('playScoreInfo', 'score'), expected_type=float_or_none),
690 'timestamp': parse_iso8601(video_info.get('isoUploadDate')),
691 'categories': traverse_obj(extra_metadata, ('videoTagMap', ..., ..., 'name'), expected_type=str),
692 'cast': traverse_obj(extra_metadata, ('actorArr', ..., 'name'), expected_type=str),
693 'episode_number': int_or_none(video_info.get('order')) or None,
694 'series': video_info.get('albumName'),
698 class IqAlbumIE(InfoExtractor):
699 IE_NAME = 'iq.com:album'
700 _VALID_URL = r'https?://(?:www\.)?iq\.com/album/(?:[\w%-]*-)?(?P<id>\w+)'
702 'url': 'https://www.iq.com/album/one-piece-1999-1bk9icvr331',
705 'title': 'One Piece',
706 'description': 'Subtitle available on Sunday 4PM(GMT+8).'
708 'playlist_mincount': 238
711 'url': 'https://www.iq.com/album/九龙城寨-2021-22yjnij099k',
716 'description': 'md5:8a09f50b8ba0db4dc69bc7c844228044',
718 'timestamp': 1641911371,
719 'upload_date': '20220111',
721 'cast': ['Shi Yan Neng', 'Yu Lang', 'Peter lv', 'Sun Zi Jun', 'Yang Xiao Bo'],
723 'average_rating': float,
725 'expected_warnings': ['format is restricted']
728 def _entries(self, album_id_num, page_ranges, album_id=None, mode_code='intl', lang_code='en_us'):
729 for page_range in page_ranges:
730 page = self._download_json(
731 f'https://pcw-api.iq.com/api/episodeListSource/{album_id_num}', album_id,
732 note=f'Downloading video list episodes {page_range.get("msg", "")}',
733 errnote='Unable to download video list', query={
735 'modeCode': mode_code,
736 'langCode': lang_code,
737 'endOrder': page_range['to'],
738 'startOrder': page_range['from']
740 for video in page['data']['epg']:
741 yield self.url_result('https://www.iq.com/play/%s' % (video.get('playLocSuffix') or video['qipuIdStr']),
742 IqIE.ie_key(), video.get('qipuIdStr'), video.get('name'))
744 def _real_extract(self, url):
745 album_id = self._match_id(url)
746 webpage = self._download_webpage(url, album_id)
747 next_data = self._search_nextjs_data(webpage, album_id)
748 album_data = next_data['props']['initialState']['album']['videoAlbumInfo']
750 if album_data.get('videoType') == 'singleVideo':
751 return self.url_result('https://www.iq.com/play/%s' % album_id, IqIE.ie_key())
752 return self.playlist_result(
753 self._entries(album_data['albumId'], album_data['totalPageRange'], album_id,
754 traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'modeCode')),
755 traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'langCode'))),
756 album_id, album_data.get('name'), album_data.get('desc'))