2 from __future__
import unicode_literals
9 from .common
import InfoExtractor
10 from ..compat
import (
12 compat_urllib_parse_urlencode
,
13 compat_urllib_parse_unquote
15 from .openload
import PhantomJSwrapper
23 get_element_by_attribute
,
40 return hashlib
.md5(text
.encode('utf-8')).hexdigest()
43 class IqiyiSDK(object):
44 def __init__(self
, target
, ip
, timestamp
):
47 self
.timestamp
= timestamp
51 return compat_str(sum(map(lambda p
: int(p
, 16), list(data
))))
55 if isinstance(num
, int):
57 return compat_str(sum(map(int, num
)))
60 even
= self
.digit_sum(compat_str(self
.timestamp
)[::2])
61 odd
= self
.digit_sum(compat_str(self
.timestamp
)[1::2])
64 def preprocess(self
, chunksize
):
65 self
.target
= md5_text(self
.target
)
67 for i
in range(32 // chunksize
):
68 chunks
.append(self
.target
[chunksize
* i
:chunksize
* (i
+ 1)])
70 chunks
.append(self
.target
[32 - 32 % chunksize
:])
71 return chunks
, list(map(int, self
.ip
.split('.')))
73 def mod(self
, modulus
):
74 chunks
, ip
= self
.preprocess(32)
75 self
.target
= chunks
[0] + ''.join(map(lambda p
: compat_str(p
% modulus
), ip
))
77 def split(self
, chunksize
):
84 chunks
, ip
= self
.preprocess(chunksize
)
86 for i
in range(len(chunks
)):
87 ip_part
= compat_str(ip
[i
] % modulus_map
[chunksize
]) if i
< 4 else ''
89 ret
+= ip_part
+ chunks
[i
]
91 ret
+= chunks
[i
] + ip_part
94 def handle_input16(self
):
95 self
.target
= md5_text(self
.target
)
96 self
.target
= self
.split_sum(self
.target
[:16]) + self
.target
+ self
.split_sum(self
.target
[16:])
98 def handle_input8(self
):
99 self
.target
= md5_text(self
.target
)
102 part
= self
.target
[8 * i
:8 * (i
+ 1)]
103 ret
+= self
.split_sum(part
) + part
107 self
.target
= md5_text(self
.target
)
108 self
.target
= self
.split_sum(self
.target
) + self
.target
110 def date(self
, scheme
):
111 self
.target
= md5_text(self
.target
)
112 d
= time
.localtime(self
.timestamp
)
114 'y': compat_str(d
.tm_year
),
115 'm': '%02d' % d
.tm_mon
,
116 'd': '%02d' % d
.tm_mday
,
118 self
.target
+= ''.join(map(lambda c
: strings
[c
], list(scheme
)))
120 def split_time_even_odd(self
):
121 even
, odd
= self
.even_odd()
122 self
.target
= odd
+ md5_text(self
.target
) + even
124 def split_time_odd_even(self
):
125 even
, odd
= self
.even_odd()
126 self
.target
= even
+ md5_text(self
.target
) + odd
128 def split_ip_time_sum(self
):
129 chunks
, ip
= self
.preprocess(32)
130 self
.target
= compat_str(sum(ip
)) + chunks
[0] + self
.digit_sum(self
.timestamp
)
132 def split_time_ip_sum(self
):
133 chunks
, ip
= self
.preprocess(32)
134 self
.target
= self
.digit_sum(self
.timestamp
) + chunks
[0] + compat_str(sum(ip
))
137 class IqiyiSDKInterpreter(object):
138 def __init__(self
, sdk_code
):
139 self
.sdk_code
= sdk_code
141 def run(self
, target
, ip
, timestamp
):
142 self
.sdk_code
= decode_packed_codes(self
.sdk_code
)
144 functions
= re
.findall(r
'input=([a-zA-Z0-9]+)\(input', self
.sdk_code
)
146 sdk
= IqiyiSDK(target
, ip
, timestamp
)
149 'handleSum': sdk
.handleSum
,
150 'handleInput8': sdk
.handle_input8
,
151 'handleInput16': sdk
.handle_input16
,
152 'splitTimeEvenOdd': sdk
.split_time_even_odd
,
153 'splitTimeOddEven': sdk
.split_time_odd_even
,
154 'splitIpTimeSum': sdk
.split_ip_time_sum
,
155 'splitTimeIpSum': sdk
.split_time_ip_sum
,
157 for function
in functions
:
158 if re
.match(r
'mod\d+', function
):
159 sdk
.mod(int(function
[3:]))
160 elif re
.match(r
'date[ymd]{3}', function
):
161 sdk
.date(function
[4:])
162 elif re
.match(r
'split\d+', function
):
163 sdk
.split(int(function
[5:]))
164 elif function
in other_functions
:
165 other_functions
[function
]()
167 raise ExtractorError('Unknown function %s' % function
)
172 class IqiyiIE(InfoExtractor
):
176 _VALID_URL
= r
'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html'
178 _NETRC_MACHINE
= 'iqiyi'
181 'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
182 # MD5 checksum differs on my machine and Travis CI
184 'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
186 'title': '美国德州空中惊现奇异云团 酷似UFO',
189 'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
190 'md5': 'b7dc800a4004b1b57749d9abae0472da',
192 'id': 'e3f585b550a280af23c98b6cb2be19fb',
194 # This can be either Simplified Chinese or Traditional Chinese
195 'title': r
're:^(?:名侦探柯南 国语版:第752集 迫近灰原秘密的黑影 下篇|名偵探柯南 國語版:第752集 迫近灰原秘密的黑影 下篇)$',
197 'skip': 'Geo-restricted to China',
199 'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
200 'only_matching': True,
202 'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
203 'only_matching': True,
205 'url': 'http://yule.iqiyi.com/pcb.html',
207 'id': '4a0af228fddb55ec96398a364248ed7f',
209 'title': '第2017-04-21期 女艺人频遭极端粉丝骚扰',
212 # VIP-only video. The first 2 parts (6 minutes) are available without login
213 # MD5 sums omitted as values are different on Travis CI and my machine
214 'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
216 'id': 'f3cf468b39dddb30d676f89a91200dc1',
220 'skip': 'Geo-restricted to China',
222 'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
227 'playlist_count': 101,
229 'url': 'http://www.pps.tv/w_19rrbav0ph.html',
230 'only_matching': True,
234 '96': 1, # 216p, 240p
240 '5': 6, # 1072p, 1080p
244 def _real_initialize(self
):
249 # public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
250 N
= 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
253 return ohdave_rsa_encrypt(data
, e
, N
)
256 username
, password
= self
._get
_login
_info
()
258 # No authentication to be performed
262 data
= self
._download
_json
(
263 'http://kylin.iqiyi.com/get_token', None,
264 note
='Get token for logging', errnote
='Unable to get token for logging')
266 timestamp
= int(time
.time())
267 target
= '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
268 username
, self
._rsa
_fun
(password
.encode('utf-8')))
270 interp
= IqiyiSDKInterpreter(sdk
)
271 sign
= interp
.run(target
, data
['ip'], timestamp
)
273 validation_params
= {
275 'server': 'BEA3AA1908656AABCCFF76582C4C6660',
276 'token': data
['token'],
277 'bird_src': 'f8d91d57af224da7893dd397d52d811a',
281 validation_result
= self
._download
_json
(
282 'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params
), None,
283 note
='Validate credentials', errnote
='Unable to validate credentials')
286 'P00107': 'please login via the web interface and enter the CAPTCHA code',
287 'P00117': 'bad username or password',
290 code
= validation_result
['code']
292 msg
= MSG_MAP
.get(code
)
294 msg
= 'error %s' % code
295 if validation_result
.get('msg'):
296 msg
+= ': ' + validation_result
['msg']
297 self
.report_warning('unable to log in: ' + msg
)
302 def get_raw_data(self
, tvid
, video_id
):
303 tm
= int(time
.time() * 1000)
305 key
= 'd5fb4bd9d50c4be6948c97edd7254b0e'
306 sc
= md5_text(compat_str(tm
) + key
+ tvid
)
310 'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
315 return self
._download
_json
(
316 'http://cache.m.iqiyi.com/jp/tmts/%s/%s/' % (tvid
, video_id
),
317 video_id
, transform_source
=lambda s
: remove_start(s
, 'var tvInfoJs='),
318 query
=params
, headers
=self
.geo_verification_headers())
320 def _extract_playlist(self
, webpage
):
324 r
'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
329 album_id
= self
._search
_regex
(
330 r
'albumId\s*:\s*(\d+),', webpage
, 'album ID')
331 album_title
= self
._search
_regex
(
332 r
'data-share-title="([^"]+)"', webpage
, 'album title', fatal
=False)
334 entries
= list(map(self
.url_result
, links
))
336 # Start from 2 because links in the first page are already on webpage
337 for page_num
in itertools
.count(2):
338 pagelist_page
= self
._download
_webpage
(
339 'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id
, page_num
, PAGE_SIZE
),
341 note
='Download playlist page %d' % page_num
,
342 errnote
='Failed to download playlist page %d' % page_num
)
343 pagelist
= self
._parse
_json
(
344 remove_start(pagelist_page
, 'var tvInfoJs='), album_id
)
345 vlist
= pagelist
['data']['vlist']
347 entries
.append(self
.url_result(item
['vurl']))
348 if len(vlist
) < PAGE_SIZE
:
351 return self
.playlist_result(entries
, album_id
, album_title
)
353 def _real_extract(self
, url
):
354 webpage
= self
._download
_webpage
(
355 url
, 'temp_id', note
='download video page')
357 # There's no simple way to determine whether an URL is a playlist or not
358 # Sometimes there are playlist links in individual videos, so treat it
359 # as a single video first
360 tvid
= self
._search
_regex
(
361 r
'data-(?:player|shareplattrigger)-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid', default=None)
363 playlist_result = self._extract_playlist(webpage)
365 return playlist_result
366 raise ExtractorError('Can\'t find any video')
368 video_id = self._search_regex(
369 r'data-(?:player|shareplattrigger)-videoid\s*=\s*[\'"]([a
-f\d
]+)', webpage, 'video_id
')
373 raw_data = self.get_raw_data(tvid, video_id)
375 if raw_data['code
'] != 'A00000
':
376 if raw_data['code
'] == 'A00111
':
377 self.raise_geo_restricted()
378 raise ExtractorError('Unable to load data
. Error code
: ' + raw_data['code
'])
380 data = raw_data['data
']
382 for stream in data['vidl
']:
383 if 'm3utx
' not in stream:
385 vd = compat_str(stream['vd
'])
387 'url
': stream['m3utx
'],
390 'quality
': self._FORMATS_MAP.get(vd, -1),
391 'protocol
': 'm3u8_native
',
397 self._sleep(5, video_id)
399 self._sort_formats(formats)
400 title = (get_element_by_id('widget
-videotitle
', webpage)
401 or clean_html(get_element_by_attribute('class', 'mod
-play
-tit
', webpage))
402 or self._html_search_regex(r'<span
[^
>]+data
-videochanged
-title
="word"[^
>]*>([^
<]+)</span
>', webpage, 'title
'))
411 class IqIE(InfoExtractor):
413 IE_DESC = 'International version of iQiyi
'
414 _VALID_URL = r'https?
://(?
:www\
.)?iq\
.com
/play
/(?
:[\w
%-]*-)?
(?P
<id>\w
+)'
416 'url
': 'https
://www
.iq
.com
/play
/one
-piece
-episode
-1000-1ma1i6ferf4
',
417 'md5
': '2d7caf6eeca8a32b407094b33b757d39
',
421 'title
': '航海王 第
1000集
',
422 'description
': 'Subtitle available on Sunday
4PM(GMT
+8)
.',
424 'timestamp
': 1637488203,
425 'upload_date
': '20211121',
426 'episode_number
': 1000,
427 'episode
': 'Episode
1000',
428 'series
': 'One Piece
',
430 'average_rating
': float,
435 'expected_warnings
': ['format
is restricted
']
437 # VIP-restricted video
438 'url
': 'https
://www
.iq
.com
/play
/mermaid
-in-the
-fog
-2021-gbdpx13bs4
',
439 'only_matching
': True
464 console.log(page.evaluate(function() {
465 var tvid = "%(tvid)s"; var vid = "%(vid)s"; var src = "%(src)s";
466 var uid = "%(uid)s"; var dfp = "%(dfp)s"; var mode = "%(mode)s"; var lang = "%(lang)s";
467 var bid_list = %(bid_list)s; var ut_list = %(ut_list)s; var tm = new Date().getTime();
468 var cmd5x_func = %(cmd5x_func)s; var cmd5x_exporter = {}; cmd5x_func({}, cmd5x_exporter, {}); var cmd5x = cmd5x_exporter.cmd5x;
469 var authKey = cmd5x(cmd5x('') + tm + '' + tvid);
470 var k_uid = Array.apply(null, Array(32)).map(function() {return Math.floor(Math.random() * 15).toString(16)}).join('');
472 bid_list.forEach(function(bid) {
497 'prio
': JSON.stringify({
511 'k_ft1
': 141287244169348,
512 'k_ft4
': 34359746564,
514 'bop
': JSON.stringify({
520 for (var prop in query) {
521 enc_params.push(encodeURIComponent(prop) + '=' + encodeURIComponent(query[prop]));
523 ut_list.forEach(function(ut) {
524 enc_params.push('ut
=' + ut);
526 var dash_path = '/dash?
' + enc_params.join('&'); dash_path += '&vf
=' + cmd5x(dash_path);
527 dash_paths[bid] = dash_path;
529 return JSON.stringify(dash_paths);
534 def _extract_vms_player_js(self, webpage, video_id):
535 player_js_cache = self._downloader.cache.load('iq
', 'player_js
')
537 return player_js_cache
538 webpack_js_url = self._proto_relative_url(self._search_regex(
539 r'<script src
="((?:https?)?//stc.iqiyipic.com/_next/static/chunks/webpack-\w+\.js)"', webpage, 'webpack URL
'))
540 webpack_js = self._download_webpage(webpack_js_url, video_id, note='Downloading webpack JS
', errnote='Unable to download webpack JS
')
541 webpack_map1, webpack_map2 = [self._parse_json(js_map, video_id, transform_source=js_to_json) for js_map in self._search_regex(
542 r'\
(({[^}
]*})\
[\w
+\
][^\
)]*\
)\s
*\
+\s
*["\']\.["\']\s
*\
+\s
*({[^}
]*})\
[\w
+\
]\
+["\']\.js', webpack_js, 'JS locations', group=(1, 2))]
543 for module_index in reversed(list(webpack_map2.keys())):
544 module_js = self._download_webpage(
545 f'https://stc.iqiyipic.com/_next/static/chunks/{webpack_map1.get(module_index, module_index)}.{webpack_map2[module_index]}.js',
546 video_id, note=f'Downloading #{module_index} module JS', errnote='Unable to download module JS', fatal=False) or ''
547 if 'vms request' in module_js:
548 self._downloader.cache.store('iq', 'player_js', module_js)
550 raise ExtractorError('Unable to extract player JS')
552 def _extract_cmd5x_function(self, webpage, video_id):
553 return self._search_regex(r',\s*(function\s*\([^\)]*\)\s*{\s*var _qda.+_qdc\(\)\s*})\s*,',
554 self._extract_vms_player_js(webpage, video_id), 'signature function')
556 def _update_bid_tags(self, webpage, video_id):
557 extracted_bid_tags = self._parse_json(
559 r'arguments\[1\][^,]*,\s*function\s*\([^\)]*\)\s*{\s*"use strict";?\s*var \w=({.+}})\s*,\s*\w\s*=\s*{\s*getNewVd',
560 self._extract_vms_player_js(webpage, video_id), 'video tags', default=''),
561 video_id, transform_source=js_to_json, fatal=False)
562 if not extracted_bid_tags:
565 bid: traverse_obj(extracted_bid_tags, (bid, 'value'), expected_type=str, default=self._BID_TAGS.get(bid))
566 for bid in extracted_bid_tags.keys()
569 def _get_cookie(self, name, default=None):
570 cookie = self._get_cookies('https://iq.com/').get(name)
571 return cookie.value if cookie else default
573 def _real_extract(self, url):
574 video_id = self._match_id(url)
575 webpage = self._download_webpage(url, video_id)
576 self._update_bid_tags(webpage, video_id)
578 next_props = self._search_nextjs_data(webpage, video_id)['props']
579 page_data = next_props['initialState']['play']
580 video_info = page_data['curVideoInfo']
584 self._get_cookie('I00002', '{}'), video_id, transform_source=compat_urllib_parse_unquote, fatal=False),
585 ('data', 'uid'), default=0)
588 vip_data = self._download_json(
589 'https://pcw-api.iq.com/api/vtype', video_id, note='Downloading VIP data', errnote='Unable to download VIP data', query={
592 'modeCode': self._get_cookie('mod', 'intl'),
593 'langCode': self._get_cookie('lang', 'en_us'),
594 'deviceId': self._get_cookie('QC005', '')
596 ut_list = traverse_obj(vip_data, ('data', 'all_vip', ..., 'vipType'), expected_type=str_or_none, default=[])
600 # bid 0 as an initial format checker
601 dash_paths = self._parse_json(PhantomJSwrapper(self).get(
602 url, html='<!DOCTYPE html>', video_id=video_id, note2='Executing signature code', jscode=self._DASH_JS % {
603 'tvid': video_info['tvId'],
604 'vid': video_info['vid'],
605 'src': traverse_obj(next_props, ('initialProps', 'pageProps', 'ptid'),
606 expected_type=str, default='04022001010011000000'),
608 'dfp': self._get_cookie('dfp', ''),
609 'mode': self._get_cookie('mod', 'intl'),
610 'lang': self._get_cookie('lang', 'en_us'),
611 'bid_list': '[' + ','.join(['0', *self._BID_TAGS.keys()]) + ']',
612 'ut_list': '[' + ','.join(ut_list) + ']',
613 'cmd5x_func': self._extract_cmd5x_function(webpage, video_id),
614 })[1].strip(), video_id)
616 formats, subtitles = [], {}
617 initial_format_data = self._download_json(
618 urljoin('https://cache-video.iq.com', dash_paths['0']), video_id,
619 note='Downloading initial video format info', errnote='Unable to download initial video format info')['data']
621 preview_time = traverse_obj(
622 initial_format_data, ('boss_ts', (None, 'data'), ('previewTime', 'rtime')), expected_type=float_or_none, get_all=False)
623 if traverse_obj(initial_format_data, ('boss_ts', 'data', 'prv'), expected_type=int_or_none):
624 self.report_warning('This preview video is limited%s' % format_field(preview_time, template='to %s seconds'))
626 # TODO: Extract audio-only formats
627 for bid in set(traverse_obj(initial_format_data, ('program', 'video', ..., 'bid'), expected_type=str_or_none, default=[])):
628 dash_path = dash_paths.get(bid)
630 self.report_warning(f'Unknown format id: {bid}. It is currently not being extracted')
632 format_data = traverse_obj(self._download_json(
633 urljoin('https://cache-video.iq.com', dash_path), video_id,
634 note=f'Downloading format data for {self._BID_TAGS[bid]}', errnote='Unable to download format data',
635 fatal=False), 'data', expected_type=dict)
637 video_format = next((video_format for video_format in traverse_obj(
638 format_data, ('program', 'video', ...), expected_type=dict, default=[]) if str(video_format['bid']) == bid), {})
639 extracted_formats = []
640 if video_format.get('m3u8Url'):
641 extracted_formats.extend(self._extract_m3u8_formats(
642 urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['m3u8Url']),
643 'mp4', m3u8_id=bid, fatal=False))
644 if video_format.get('mpdUrl'):
645 # TODO: Properly extract mpd hostname
646 extracted_formats.extend(self._extract_mpd_formats(
647 urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['mpdUrl']),
648 mpd_id=bid, fatal=False))
649 if video_format.get('m3u8'):
650 ff = video_format.get('ff', 'ts')
652 m3u8_formats, _ = self._parse_m3u8_formats_and_subtitles(
653 video_format['m3u8'], ext='mp4', m3u8_id=bid, fatal=False)
654 extracted_formats.extend(m3u8_formats)
656 mpd_data = traverse_obj(
657 self._parse_json(video_format['m3u8'], video_id, fatal=False), ('payload', ..., 'data'), expected_type=str)
660 mpd_formats, _ = self._parse_mpd_formats_and_subtitles(
661 mpd_data, bid, format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'))
662 extracted_formats.extend(mpd_formats)
664 self.report_warning(f'{ff} formats are currently not supported')
666 if not extracted_formats:
667 if video_format.get('s'):
668 self.report_warning(f'{self._BID_TAGS[bid]} format is restricted')
670 self.report_warning(f'Unable to extract {self._BID_TAGS[bid]} format')
671 for f in extracted_formats:
673 'quality': qualities(list(self._BID_TAGS.keys()))(bid),
674 'format_note': self._BID_TAGS[bid],
675 **parse_resolution(video_format.get('scrsz'))
677 formats.extend(extracted_formats)
679 self._sort_formats(formats)
681 for sub_format in traverse_obj(initial_format_data, ('program', 'stl', ...), expected_type=dict, default=[]):
682 lang = self._LID_TAGS.get(str_or_none(sub_format.get('lid')), sub_format.get('_name'))
683 subtitles.setdefault(lang, []).extend([{
685 'url': urljoin(initial_format_data.get('dstl', 'http://meta.video.iqiyi.com'), sub_format[format_key])
686 } for format_key, format_ext in [('srt', 'srt'), ('webvtt', 'vtt')] if sub_format.get(format_key)])
688 extra_metadata = page_data.get('albumInfo') if video_info.get('albumId') and page_data.get('albumInfo') else video_info
691 'title': video_info['name'],
693 'subtitles': subtitles,
694 'description': video_info.get('mergeDesc'),
695 'duration': parse_duration(video_info.get('len')),
696 'age_limit': parse_age_limit(video_info.get('rating')),
697 'average_rating': traverse_obj(page_data, ('playScoreInfo', 'score'), expected_type=float_or_none),
698 'timestamp': parse_iso8601(video_info.get('isoUploadDate')),
699 'categories': traverse_obj(extra_metadata, ('videoTagMap', ..., ..., 'name'), expected_type=str),
700 'cast': traverse_obj(extra_metadata, ('actorArr', ..., 'name'), expected_type=str),
701 'episode_number': int_or_none(video_info.get('order')) or None,
702 'series': video_info.get('albumName'),
706 class IqAlbumIE(InfoExtractor):
707 IE_NAME = 'iq.com:album'
708 _VALID_URL = r'https?://(?:www\.)?iq\.com/album/(?:[\w%-]*-)?(?P<id>\w+)'
710 'url': 'https://www.iq.com/album/one-piece-1999-1bk9icvr331',
713 'title': 'One Piece',
714 'description': 'Subtitle available on Sunday 4PM(GMT+8).'
716 'playlist_mincount': 238
719 'url': 'https://www.iq.com/album/九龙城寨-2021-22yjnij099k',
724 'description': 'md5:8a09f50b8ba0db4dc69bc7c844228044',
726 'timestamp': 1641911371,
727 'upload_date': '20220111',
729 'cast': ['Shi Yan Neng', 'Yu Lang', 'Peter lv', 'Sun Zi Jun', 'Yang Xiao Bo'],
731 'average_rating': float,
733 'expected_warnings': ['format is restricted']
736 def _entries(self, album_id_num, page_ranges, album_id=None, mode_code='intl', lang_code='en_us'):
737 for page_range in page_ranges:
738 page = self._download_json(
739 f'https://pcw-api.iq.com/api/episodeListSource/{album_id_num}', album_id,
740 note=f'Downloading video list episodes {page_range.get("msg", "")}',
741 errnote='Unable to download video list', query={
743 'modeCode': mode_code,
744 'langCode': lang_code,
745 'endOrder': page_range['to'],
746 'startOrder': page_range['from']
748 for video in page['data']['epg']:
749 yield self.url_result('https://www.iq.com/play/%s' % (video.get('playLocSuffix') or video['qipuIdStr']),
750 IqIE.ie_key(), video.get('qipuIdStr'), video.get('name'))
752 def _real_extract(self, url):
753 album_id = self._match_id(url)
754 webpage = self._download_webpage(url, album_id)
755 next_data = self._search_nextjs_data(webpage, album_id)
756 album_data = next_data['props']['initialState']['album']['videoAlbumInfo']
758 if album_data.get('videoType') == 'singleVideo':
759 return self.url_result('https://www.iq.com/play/%s' % album_id, IqIE.ie_key())
760 return self.playlist_result(
761 self._entries(album_data['albumId'], album_data['totalPageRange'], album_id,
762 traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'modeCode')),
763 traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'langCode'))),
764 album_id, album_data.get('name'), album_data.get('desc'))