6 from .common
import InfoExtractor
9 compat_urllib_parse_urlencode
,
10 compat_urllib_parse_unquote
12 from .openload
import PhantomJSwrapper
20 get_element_by_attribute
,
37 return hashlib
.md5(text
.encode('utf-8')).hexdigest()
41 def __init__(self
, target
, ip
, timestamp
):
44 self
.timestamp
= timestamp
48 return compat_str(sum(map(lambda p
: int(p
, 16), list(data
))))
52 if isinstance(num
, int):
54 return compat_str(sum(map(int, num
)))
57 even
= self
.digit_sum(compat_str(self
.timestamp
)[::2])
58 odd
= self
.digit_sum(compat_str(self
.timestamp
)[1::2])
61 def preprocess(self
, chunksize
):
62 self
.target
= md5_text(self
.target
)
64 for i
in range(32 // chunksize
):
65 chunks
.append(self
.target
[chunksize
* i
:chunksize
* (i
+ 1)])
67 chunks
.append(self
.target
[32 - 32 % chunksize
:])
68 return chunks
, list(map(int, self
.ip
.split('.')))
70 def mod(self
, modulus
):
71 chunks
, ip
= self
.preprocess(32)
72 self
.target
= chunks
[0] + ''.join(map(lambda p
: compat_str(p
% modulus
), ip
))
74 def split(self
, chunksize
):
81 chunks
, ip
= self
.preprocess(chunksize
)
83 for i
in range(len(chunks
)):
84 ip_part
= compat_str(ip
[i
] % modulus_map
[chunksize
]) if i
< 4 else ''
86 ret
+= ip_part
+ chunks
[i
]
88 ret
+= chunks
[i
] + ip_part
91 def handle_input16(self
):
92 self
.target
= md5_text(self
.target
)
93 self
.target
= self
.split_sum(self
.target
[:16]) + self
.target
+ self
.split_sum(self
.target
[16:])
95 def handle_input8(self
):
96 self
.target
= md5_text(self
.target
)
99 part
= self
.target
[8 * i
:8 * (i
+ 1)]
100 ret
+= self
.split_sum(part
) + part
104 self
.target
= md5_text(self
.target
)
105 self
.target
= self
.split_sum(self
.target
) + self
.target
107 def date(self
, scheme
):
108 self
.target
= md5_text(self
.target
)
109 d
= time
.localtime(self
.timestamp
)
111 'y': compat_str(d
.tm_year
),
112 'm': '%02d' % d
.tm_mon
,
113 'd': '%02d' % d
.tm_mday
,
115 self
.target
+= ''.join(map(lambda c
: strings
[c
], list(scheme
)))
117 def split_time_even_odd(self
):
118 even
, odd
= self
.even_odd()
119 self
.target
= odd
+ md5_text(self
.target
) + even
121 def split_time_odd_even(self
):
122 even
, odd
= self
.even_odd()
123 self
.target
= even
+ md5_text(self
.target
) + odd
125 def split_ip_time_sum(self
):
126 chunks
, ip
= self
.preprocess(32)
127 self
.target
= compat_str(sum(ip
)) + chunks
[0] + self
.digit_sum(self
.timestamp
)
129 def split_time_ip_sum(self
):
130 chunks
, ip
= self
.preprocess(32)
131 self
.target
= self
.digit_sum(self
.timestamp
) + chunks
[0] + compat_str(sum(ip
))
134 class IqiyiSDKInterpreter
:
135 def __init__(self
, sdk_code
):
136 self
.sdk_code
= sdk_code
138 def run(self
, target
, ip
, timestamp
):
139 self
.sdk_code
= decode_packed_codes(self
.sdk_code
)
141 functions
= re
.findall(r
'input=([a-zA-Z0-9]+)\(input', self
.sdk_code
)
143 sdk
= IqiyiSDK(target
, ip
, timestamp
)
146 'handleSum': sdk
.handleSum
,
147 'handleInput8': sdk
.handle_input8
,
148 'handleInput16': sdk
.handle_input16
,
149 'splitTimeEvenOdd': sdk
.split_time_even_odd
,
150 'splitTimeOddEven': sdk
.split_time_odd_even
,
151 'splitIpTimeSum': sdk
.split_ip_time_sum
,
152 'splitTimeIpSum': sdk
.split_time_ip_sum
,
154 for function
in functions
:
155 if re
.match(r
'mod\d+', function
):
156 sdk
.mod(int(function
[3:]))
157 elif re
.match(r
'date[ymd]{3}', function
):
158 sdk
.date(function
[4:])
159 elif re
.match(r
'split\d+', function
):
160 sdk
.split(int(function
[5:]))
161 elif function
in other_functions
:
162 other_functions
[function
]()
164 raise ExtractorError('Unknown function %s' % function
)
169 class IqiyiIE(InfoExtractor
):
173 _VALID_URL
= r
'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html'
175 _NETRC_MACHINE
= 'iqiyi'
178 'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
179 # MD5 checksum differs on my machine and Travis CI
181 'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
183 'title': '美国德州空中惊现奇异云团 酷似UFO',
186 'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
187 'md5': 'b7dc800a4004b1b57749d9abae0472da',
189 'id': 'e3f585b550a280af23c98b6cb2be19fb',
191 # This can be either Simplified Chinese or Traditional Chinese
192 'title': r
're:^(?:名侦探柯南 国语版:第752集 迫近灰原秘密的黑影 下篇|名偵探柯南 國語版:第752集 迫近灰原秘密的黑影 下篇)$',
194 'skip': 'Geo-restricted to China',
196 'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
197 'only_matching': True,
199 'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
200 'only_matching': True,
202 'url': 'http://yule.iqiyi.com/pcb.html',
204 'id': '4a0af228fddb55ec96398a364248ed7f',
206 'title': '第2017-04-21期 女艺人频遭极端粉丝骚扰',
209 # VIP-only video. The first 2 parts (6 minutes) are available without login
210 # MD5 sums omitted as values are different on Travis CI and my machine
211 'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
213 'id': 'f3cf468b39dddb30d676f89a91200dc1',
217 'skip': 'Geo-restricted to China',
219 'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
224 'playlist_count': 101,
226 'url': 'http://www.pps.tv/w_19rrbav0ph.html',
227 'only_matching': True,
231 '96': 1, # 216p, 240p
237 '5': 6, # 1072p, 1080p
243 # public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
244 N
= 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
247 return ohdave_rsa_encrypt(data
, e
, N
)
249 def _perform_login(self
, username
, password
):
251 data
= self
._download
_json
(
252 'http://kylin.iqiyi.com/get_token', None,
253 note
='Get token for logging', errnote
='Unable to get token for logging')
255 timestamp
= int(time
.time())
256 target
= '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
257 username
, self
._rsa
_fun
(password
.encode('utf-8')))
259 interp
= IqiyiSDKInterpreter(sdk
)
260 sign
= interp
.run(target
, data
['ip'], timestamp
)
262 validation_params
= {
264 'server': 'BEA3AA1908656AABCCFF76582C4C6660',
265 'token': data
['token'],
266 'bird_src': 'f8d91d57af224da7893dd397d52d811a',
270 validation_result
= self
._download
_json
(
271 'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params
), None,
272 note
='Validate credentials', errnote
='Unable to validate credentials')
275 'P00107': 'please login via the web interface and enter the CAPTCHA code',
276 'P00117': 'bad username or password',
279 code
= validation_result
['code']
281 msg
= MSG_MAP
.get(code
)
283 msg
= 'error %s' % code
284 if validation_result
.get('msg'):
285 msg
+= ': ' + validation_result
['msg']
286 self
.report_warning('unable to log in: ' + msg
)
291 def get_raw_data(self
, tvid
, video_id
):
292 tm
= int(time
.time() * 1000)
294 key
= 'd5fb4bd9d50c4be6948c97edd7254b0e'
295 sc
= md5_text(compat_str(tm
) + key
+ tvid
)
299 'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
304 return self
._download
_json
(
305 'http://cache.m.iqiyi.com/jp/tmts/%s/%s/' % (tvid
, video_id
),
306 video_id
, transform_source
=lambda s
: remove_start(s
, 'var tvInfoJs='),
307 query
=params
, headers
=self
.geo_verification_headers())
309 def _extract_playlist(self
, webpage
):
313 r
'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
318 album_id
= self
._search
_regex
(
319 r
'albumId\s*:\s*(\d+),', webpage
, 'album ID')
320 album_title
= self
._search
_regex
(
321 r
'data-share-title="([^"]+)"', webpage
, 'album title', fatal
=False)
323 entries
= list(map(self
.url_result
, links
))
325 # Start from 2 because links in the first page are already on webpage
326 for page_num
in itertools
.count(2):
327 pagelist_page
= self
._download
_webpage
(
328 'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id
, page_num
, PAGE_SIZE
),
330 note
='Download playlist page %d' % page_num
,
331 errnote
='Failed to download playlist page %d' % page_num
)
332 pagelist
= self
._parse
_json
(
333 remove_start(pagelist_page
, 'var tvInfoJs='), album_id
)
334 vlist
= pagelist
['data']['vlist']
336 entries
.append(self
.url_result(item
['vurl']))
337 if len(vlist
) < PAGE_SIZE
:
340 return self
.playlist_result(entries
, album_id
, album_title
)
342 def _real_extract(self
, url
):
343 webpage
= self
._download
_webpage
(
344 url
, 'temp_id', note
='download video page')
346 # There's no simple way to determine whether an URL is a playlist or not
347 # Sometimes there are playlist links in individual videos, so treat it
348 # as a single video first
349 tvid
= self
._search
_regex
(
350 r
'data-(?:player|shareplattrigger)-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid', default=None)
352 playlist_result = self._extract_playlist(webpage)
354 return playlist_result
355 raise ExtractorError('Can\'t find any video')
357 video_id = self._search_regex(
358 r'data-(?:player|shareplattrigger)-videoid\s*=\s*[\'"]([a
-f\d
]+)', webpage, 'video_id
')
362 raw_data = self.get_raw_data(tvid, video_id)
364 if raw_data['code
'] != 'A00000
':
365 if raw_data['code
'] == 'A00111
':
366 self.raise_geo_restricted()
367 raise ExtractorError('Unable to load data
. Error code
: ' + raw_data['code
'])
369 data = raw_data['data
']
371 for stream in data['vidl
']:
372 if 'm3utx
' not in stream:
374 vd = compat_str(stream['vd
'])
376 'url
': stream['m3utx
'],
379 'quality
': self._FORMATS_MAP.get(vd, -1),
380 'protocol
': 'm3u8_native
',
386 self._sleep(5, video_id)
388 title = (get_element_by_id('widget
-videotitle
', webpage)
389 or clean_html(get_element_by_attribute('class', 'mod
-play
-tit
', webpage))
390 or self._html_search_regex(r'<span
[^
>]+data
-videochanged
-title
="word"[^
>]*>([^
<]+)</span
>', webpage, 'title
'))
399 class IqIE(InfoExtractor):
401 IE_DESC = 'International version of iQiyi
'
402 _VALID_URL = r'https?
://(?
:www\
.)?iq\
.com
/play
/(?
:[\w
%-]*-)?
(?P
<id>\w
+)'
404 'url
': 'https
://www
.iq
.com
/play
/one
-piece
-episode
-1000-1ma1i6ferf4
',
405 'md5
': '2d7caf6eeca8a32b407094b33b757d39
',
409 'title
': '航海王 第
1000集
',
410 'description
': 'Subtitle available on Sunday
4PM(GMT
+8)
.',
412 'timestamp
': 1637488203,
413 'upload_date
': '20211121',
414 'episode_number
': 1000,
415 'episode
': 'Episode
1000',
416 'series
': 'One Piece
',
418 'average_rating
': float,
423 'expected_warnings
': ['format
is restricted
']
425 # VIP-restricted video
426 'url
': 'https
://www
.iq
.com
/play
/mermaid
-in-the
-fog
-2021-gbdpx13bs4
',
427 'only_matching
': True
453 console.log(page.evaluate(function() {
454 var tvid = "%(tvid)s"; var vid = "%(vid)s"; var src = "%(src)s";
455 var uid = "%(uid)s"; var dfp = "%(dfp)s"; var mode = "%(mode)s"; var lang = "%(lang)s";
456 var bid_list = %(bid_list)s; var ut_list = %(ut_list)s; var tm = new Date().getTime();
457 var cmd5x_func = %(cmd5x_func)s; var cmd5x_exporter = {}; cmd5x_func({}, cmd5x_exporter, {}); var cmd5x = cmd5x_exporter.cmd5x;
458 var authKey = cmd5x(cmd5x('') + tm + '' + tvid);
459 var k_uid = Array.apply(null, Array(32)).map(function() {return Math.floor(Math.random() * 15).toString(16)}).join('');
461 bid_list.forEach(function(bid) {
486 'prio
': JSON.stringify({
500 'k_ft1
': 141287244169348,
501 'k_ft4
': 34359746564,
503 'bop
': JSON.stringify({
509 for (var prop in query) {
510 enc_params.push(encodeURIComponent(prop) + '=' + encodeURIComponent(query[prop]));
512 ut_list.forEach(function(ut) {
513 enc_params.push('ut
=' + ut);
515 var dash_path = '/dash?
' + enc_params.join('&'); dash_path += '&vf
=' + cmd5x(dash_path);
516 dash_paths[bid] = dash_path;
518 return JSON.stringify(dash_paths);
523 def _extract_vms_player_js(self, webpage, video_id):
524 player_js_cache = self.cache.load('iq
', 'player_js
')
526 return player_js_cache
527 webpack_js_url = self._proto_relative_url(self._search_regex(
528 r'<script src
="((?:https?)?//stc.iqiyipic.com/_next/static/chunks/webpack-\w+\.js)"', webpage, 'webpack URL
'))
529 webpack_js = self._download_webpage(webpack_js_url, video_id, note='Downloading webpack JS
', errnote='Unable to download webpack JS
')
530 webpack_map1, webpack_map2 = [self._parse_json(js_map, video_id, transform_source=js_to_json) for js_map in self._search_regex(
531 r'\
(({[^}
]*})\
[\w
+\
][^\
)]*\
)\s
*\
+\s
*["\']\.["\']\s
*\
+\s
*({[^}
]*})\
[\w
+\
]\
+["\']\.js', webpack_js, 'JS locations', group=(1, 2))]
532 for module_index in reversed(list(webpack_map2.keys())):
533 module_js = self._download_webpage(
534 f'https://stc.iqiyipic.com/_next/static/chunks/{webpack_map1.get(module_index, module_index)}.{webpack_map2[module_index]}.js',
535 video_id, note=f'Downloading #{module_index} module JS', errnote='Unable to download module JS', fatal=False) or ''
536 if 'vms request' in module_js:
537 self.cache.store('iq', 'player_js', module_js)
539 raise ExtractorError('Unable to extract player JS')
541 def _extract_cmd5x_function(self, webpage, video_id):
542 return self._search_regex(r',\s*(function\s*\([^\)]*\)\s*{\s*var _qda.+_qdc\(\)\s*})\s*,',
543 self._extract_vms_player_js(webpage, video_id), 'signature function')
545 def _update_bid_tags(self, webpage, video_id):
546 extracted_bid_tags = self._parse_json(
548 r'arguments\[1\][^,]*,\s*function\s*\([^\)]*\)\s*{\s*"use strict";?\s*var \w=({.+}})\s*,\s*\w\s*=\s*{\s*getNewVd',
549 self._extract_vms_player_js(webpage, video_id), 'video tags', default=''),
550 video_id, transform_source=js_to_json, fatal=False)
551 if not extracted_bid_tags:
554 bid: traverse_obj(extracted_bid_tags, (bid, 'value'), expected_type=str, default=self._BID_TAGS.get(bid))
555 for bid in extracted_bid_tags.keys()
558 def _get_cookie(self, name, default=None):
559 cookie = self._get_cookies('https://iq.com/').get(name)
560 return cookie.value if cookie else default
562 def _real_extract(self, url):
563 video_id = self._match_id(url)
564 webpage = self._download_webpage(url, video_id)
565 self._update_bid_tags(webpage, video_id)
567 next_props = self._search_nextjs_data(webpage, video_id)['props']
568 page_data = next_props['initialState']['play']
569 video_info = page_data['curVideoInfo']
573 self._get_cookie('I00002', '{}'), video_id, transform_source=compat_urllib_parse_unquote, fatal=False),
574 ('data', 'uid'), default=0)
577 vip_data = self._download_json(
578 'https://pcw-api.iq.com/api/vtype', video_id, note='Downloading VIP data', errnote='Unable to download VIP data', query={
581 'modeCode': self._get_cookie('mod', 'intl'),
582 'langCode': self._get_cookie('lang', 'en_us'),
583 'deviceId': self._get_cookie('QC005', '')
585 ut_list = traverse_obj(vip_data, ('data', 'all_vip', ..., 'vipType'), expected_type=str_or_none, default=[])
589 # bid 0 as an initial format checker
590 dash_paths = self._parse_json(PhantomJSwrapper(self, timeout=120_000).get(
591 url, note2='Executing signature code (this may take a couple minutes)',
592 html='<!DOCTYPE html>', video_id=video_id, jscode=self._DASH_JS % {
593 'tvid': video_info['tvId'],
594 'vid': video_info['vid'],
595 'src': traverse_obj(next_props, ('initialProps', 'pageProps', 'ptid'),
596 expected_type=str, default='04022001010011000000'),
598 'dfp': self._get_cookie('dfp', ''),
599 'mode': self._get_cookie('mod', 'intl'),
600 'lang': self._get_cookie('lang', 'en_us'),
601 'bid_list': '[' + ','.join(['0', *self._BID_TAGS.keys()]) + ']',
602 'ut_list': '[' + ','.join(ut_list) + ']',
603 'cmd5x_func': self._extract_cmd5x_function(webpage, video_id),
604 })[1].strip(), video_id)
606 formats, subtitles = [], {}
607 initial_format_data = self._download_json(
608 urljoin('https://cache-video.iq.com', dash_paths['0']), video_id,
609 note='Downloading initial video format info', errnote='Unable to download initial video format info')['data']
611 preview_time = traverse_obj(
612 initial_format_data, ('boss_ts', (None, 'data'), ('previewTime', 'rtime')), expected_type=float_or_none, get_all=False)
613 if traverse_obj(initial_format_data, ('boss_ts', 'data', 'prv'), expected_type=int_or_none):
614 self.report_warning('This preview video is limited%s' % format_field(preview_time, None, ' to %s seconds'))
616 # TODO: Extract audio-only formats
617 for bid in set(traverse_obj(initial_format_data, ('program', 'video', ..., 'bid'), expected_type=str_or_none, default=[])):
618 dash_path = dash_paths.get(bid)
620 self.report_warning(f'Unknown format id: {bid}. It is currently not being extracted')
622 format_data = traverse_obj(self._download_json(
623 urljoin('https://cache-video.iq.com', dash_path), video_id,
624 note=f'Downloading format data for {self._BID_TAGS[bid]}', errnote='Unable to download format data',
625 fatal=False), 'data', expected_type=dict)
627 video_format = traverse_obj(format_data, ('program', 'video', lambda _, v: str(v['bid']) == bid),
628 expected_type=dict, default=[], get_all=False) or {}
629 extracted_formats = []
630 if video_format.get('m3u8Url'):
631 extracted_formats.extend(self._extract_m3u8_formats(
632 urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['m3u8Url']),
633 'mp4', m3u8_id=bid, fatal=False))
634 if video_format.get('mpdUrl'):
635 # TODO: Properly extract mpd hostname
636 extracted_formats.extend(self._extract_mpd_formats(
637 urljoin(format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'), video_format['mpdUrl']),
638 mpd_id=bid, fatal=False))
639 if video_format.get('m3u8'):
640 ff = video_format.get('ff', 'ts')
642 m3u8_formats, _ = self._parse_m3u8_formats_and_subtitles(
643 video_format['m3u8'], ext='mp4', m3u8_id=bid, fatal=False)
644 extracted_formats.extend(m3u8_formats)
646 mpd_data = traverse_obj(
647 self._parse_json(video_format['m3u8'], video_id, fatal=False), ('payload', ..., 'data'), expected_type=str)
650 mpd_formats, _ = self._parse_mpd_formats_and_subtitles(
651 mpd_data, bid, format_data.get('dm3u8', 'https://cache-m.iq.com/dc/dt/'))
652 extracted_formats.extend(mpd_formats)
654 self.report_warning(f'{ff} formats are currently not supported')
656 if not extracted_formats:
657 if video_format.get('s'):
658 self.report_warning(f'{self._BID_TAGS[bid]} format is restricted')
660 self.report_warning(f'Unable to extract {self._BID_TAGS[bid]} format')
661 for f in extracted_formats:
663 'quality': qualities(list(self._BID_TAGS.keys()))(bid),
664 'format_note': self._BID_TAGS[bid],
665 **parse_resolution(video_format.get('scrsz'))
667 formats.extend(extracted_formats)
669 for sub_format in traverse_obj(initial_format_data, ('program', 'stl', ...), expected_type=dict, default=[]):
670 lang = self._LID_TAGS.get(str_or_none(sub_format.get('lid')), sub_format.get('_name'))
671 subtitles.setdefault(lang, []).extend([{
673 'url': urljoin(initial_format_data.get('dstl', 'http://meta.video.iqiyi.com'), sub_format[format_key])
674 } for format_key, format_ext in [('srt', 'srt'), ('webvtt', 'vtt')] if sub_format.get(format_key)])
676 extra_metadata = page_data.get('albumInfo') if video_info.get('albumId') and page_data.get('albumInfo') else video_info
679 'title': video_info['name'],
681 'subtitles': subtitles,
682 'description': video_info.get('mergeDesc'),
683 'duration': parse_duration(video_info.get('len')),
684 'age_limit': parse_age_limit(video_info.get('rating')),
685 'average_rating': traverse_obj(page_data, ('playScoreInfo', 'score'), expected_type=float_or_none),
686 'timestamp': parse_iso8601(video_info.get('isoUploadDate')),
687 'categories': traverse_obj(extra_metadata, ('videoTagMap', ..., ..., 'name'), expected_type=str),
688 'cast': traverse_obj(extra_metadata, ('actorArr', ..., 'name'), expected_type=str),
689 'episode_number': int_or_none(video_info.get('order')) or None,
690 'series': video_info.get('albumName'),
694 class IqAlbumIE(InfoExtractor):
695 IE_NAME = 'iq.com:album'
696 _VALID_URL = r'https?://(?:www\.)?iq\.com/album/(?:[\w%-]*-)?(?P<id>\w+)'
698 'url': 'https://www.iq.com/album/one-piece-1999-1bk9icvr331',
701 'title': 'One Piece',
702 'description': 'Subtitle available on Sunday 4PM(GMT+8).'
704 'playlist_mincount': 238
707 'url': 'https://www.iq.com/album/九龙城寨-2021-22yjnij099k',
712 'description': 'md5:8a09f50b8ba0db4dc69bc7c844228044',
714 'timestamp': 1641911371,
715 'upload_date': '20220111',
717 'cast': ['Shi Yan Neng', 'Yu Lang', 'Peter lv', 'Sun Zi Jun', 'Yang Xiao Bo'],
719 'average_rating': float,
721 'expected_warnings': ['format is restricted']
724 def _entries(self, album_id_num, page_ranges, album_id=None, mode_code='intl', lang_code='en_us'):
725 for page_range in page_ranges:
726 page = self._download_json(
727 f'https://pcw-api.iq.com/api/episodeListSource/{album_id_num}', album_id,
728 note=f'Downloading video list episodes {page_range.get("msg", "")}',
729 errnote='Unable to download video list', query={
731 'modeCode': mode_code,
732 'langCode': lang_code,
733 'endOrder': page_range['to'],
734 'startOrder': page_range['from']
736 for video in page['data']['epg']:
737 yield self.url_result('https://www.iq.com/play/%s' % (video.get('playLocSuffix') or video['qipuIdStr']),
738 IqIE.ie_key(), video.get('qipuIdStr'), video.get('name'))
740 def _real_extract(self, url):
741 album_id = self._match_id(url)
742 webpage = self._download_webpage(url, album_id)
743 next_data = self._search_nextjs_data(webpage, album_id)
744 album_data = next_data['props']['initialState']['album']['videoAlbumInfo']
746 if album_data.get('videoType') == 'singleVideo':
747 return self.url_result('https://www.iq.com/play/%s' % album_id, IqIE.ie_key())
748 return self.playlist_result(
749 self._entries(album_data['albumId'], album_data['totalPageRange'], album_id,
750 traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'modeCode')),
751 traverse_obj(next_data, ('props', 'initialProps', 'pageProps', 'langCode'))),
752 album_id, album_data.get('name'), album_data.get('desc'))