+import functools
import itertools
import json
import random
import re
-import string
import time
import uuid
UnsupportedError,
UserNotLive,
determine_ext,
+ filter_dict,
format_field,
int_or_none,
join_nonempty,
merge_dicts,
+ mimetype2ext,
+ parse_qs,
qualities,
remove_start,
srt_subtitles_timecode,
# "app id": aweme = 1128, trill = 1180, musical_ly = 1233, universal = 0
'aid': '0',
}
- _KNOWN_APP_INFO = [
- '7351144126450059040',
- '7351149742343391009',
- '7351153174894626592',
- ]
_APP_INFO_POOL = None
_APP_INFO = None
_APP_USER_AGENT = None
- @property
+ @functools.cached_property
+ def _KNOWN_APP_INFO(self):
+ # If we have a genuine device ID, we may not need any IID
+ default = [''] if self._KNOWN_DEVICE_ID else []
+ return self._configuration_arg('app_info', default, ie_key=TikTokIE)
+
+ @functools.cached_property
+ def _KNOWN_DEVICE_ID(self):
+ return self._configuration_arg('device_id', [None], ie_key=TikTokIE)[0]
+
+ @functools.cached_property
+ def _DEVICE_ID(self):
+ return self._KNOWN_DEVICE_ID or str(random.randint(7250000000000000000, 7351147085025500000))
+
+ @functools.cached_property
def _API_HOSTNAME(self):
return self._configuration_arg(
- 'api_hostname', ['api22-normal-c-useast2a.tiktokv.com'], ie_key=TikTokIE)[0]
+ 'api_hostname', ['api16-normal-c-useast1a.tiktokv.com'], ie_key=TikTokIE)[0]
def _get_next_app_info(self):
if self._APP_INFO_POOL is None:
for key, default in self._APP_INFO_DEFAULTS.items()
if key != 'iid'
}
- app_info_list = (
- self._configuration_arg('app_info', ie_key=TikTokIE)
- or random.sample(self._KNOWN_APP_INFO, len(self._KNOWN_APP_INFO)))
self._APP_INFO_POOL = [
{**defaults, **dict(
(k, v) for k, v in zip(self._APP_INFO_DEFAULTS, app_info.split('/')) if v
- )} for app_info in app_info_list
+ )} for app_info in self._KNOWN_APP_INFO
]
if not self._APP_INFO_POOL:
}, query=query)
def _build_api_query(self, query):
- return {
+ return filter_dict({
**query,
'device_platform': 'android',
'os': 'android',
'locale': 'en',
'ac2': 'wifi5g',
'uoo': '1',
+ 'carrier_region': 'US',
'op_region': 'US',
'build_number': self._APP_INFO['app_version'],
'region': 'US',
'ts': int(time.time()),
- 'iid': self._APP_INFO['iid'],
- 'device_id': random.randint(7250000000000000000, 7351147085025500000),
+ 'iid': self._APP_INFO.get('iid'),
+ 'device_id': self._DEVICE_ID,
'openudid': ''.join(random.choices('0123456789abcdef', k=16)),
- }
+ })
def _call_api(self, ep, query, video_id, fatal=True,
note='Downloading API JSON', errnote='Unable to download API page'):
raise ExtractorError('Unable to find video in feed', video_id=aweme_id)
return self._parse_aweme_video_app(aweme_detail)
- def _get_subtitles(self, aweme_detail, aweme_id):
+ def _extract_web_data_and_status(self, url, video_id, fatal=True):
+ webpage = self._download_webpage(url, video_id, headers={'User-Agent': 'Mozilla/5.0'}, fatal=fatal) or ''
+ video_data, status = {}, None
+
+ if universal_data := self._get_universal_data(webpage, video_id):
+ self.write_debug('Found universal data for rehydration')
+ status = traverse_obj(universal_data, ('webapp.video-detail', 'statusCode', {int})) or 0
+ video_data = traverse_obj(universal_data, ('webapp.video-detail', 'itemInfo', 'itemStruct', {dict}))
+
+ elif sigi_data := self._get_sigi_state(webpage, video_id):
+ self.write_debug('Found sigi state data')
+ status = traverse_obj(sigi_data, ('VideoPage', 'statusCode', {int})) or 0
+ video_data = traverse_obj(sigi_data, ('ItemModule', video_id, {dict}))
+
+ elif next_data := self._search_nextjs_data(webpage, video_id, default={}):
+ self.write_debug('Found next.js data')
+ status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode', {int})) or 0
+ video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct', {dict}))
+
+ elif fatal:
+ raise ExtractorError('Unable to extract webpage video data')
+
+ return video_data, status
+
+ def _get_subtitles(self, aweme_detail, aweme_id, user_url):
# TODO: Extract text positioning info
subtitles = {}
# aweme/detail endpoint subs
})
# webpage subs
if not subtitles:
- for caption in traverse_obj(aweme_detail, ('video', 'subtitleInfos', ...), expected_type=dict):
- if not caption.get('Url'):
- continue
+ if user_url: # only _parse_aweme_video_app needs to extract the webpage here
+ aweme_detail, _ = self._extract_web_data_and_status(
+ f'{user_url}/video/{aweme_id}', aweme_id, fatal=False)
+ for caption in traverse_obj(aweme_detail, ('video', 'subtitleInfos', lambda _, v: v['Url'])):
subtitles.setdefault(caption.get('LanguageCodeName') or 'en', []).append({
'ext': remove_start(caption.get('Format'), 'web'),
'url': caption['Url'],
})
return subtitles
+ def _parse_url_key(self, url_key):
+ format_id, codec, res, bitrate = self._search_regex(
+ r'v[^_]+_(?P<id>(?P<codec>[^_]+)_(?P<res>\d+p)_(?P<bitrate>\d+))', url_key,
+ 'url key', default=(None, None, None, None), group=('id', 'codec', 'res', 'bitrate'))
+ if not format_id:
+ return {}, None
+ return {
+ 'format_id': format_id,
+ 'vcodec': 'h265' if codec == 'bytevc1' else codec,
+ 'tbr': int_or_none(bitrate, scale=1000) or None,
+ 'quality': qualities(self.QUALITIES)(res),
+ }, res
+
def _parse_aweme_video_app(self, aweme_detail):
aweme_id = aweme_detail['aweme_id']
video_info = aweme_detail['video']
-
- def parse_url_key(url_key):
- format_id, codec, res, bitrate = self._search_regex(
- r'v[^_]+_(?P<id>(?P<codec>[^_]+)_(?P<res>\d+p)_(?P<bitrate>\d+))', url_key,
- 'url key', default=(None, None, None, None), group=('id', 'codec', 'res', 'bitrate'))
- if not format_id:
- return {}, None
- return {
- 'format_id': format_id,
- 'vcodec': 'h265' if codec == 'bytevc1' else codec,
- 'tbr': int_or_none(bitrate, scale=1000) or None,
- 'quality': qualities(self.QUALITIES)(res),
- }, res
-
known_resolutions = {}
def audio_meta(url):
} if ext == 'mp3' or '-music-' in url else {}
def extract_addr(addr, add_meta={}):
- parsed_meta, res = parse_url_key(addr.get('url_key', ''))
+ parsed_meta, res = self._parse_url_key(addr.get('url_key', ''))
is_bytevc2 = parsed_meta.get('vcodec') == 'bytevc2'
if res:
known_resolutions.setdefault(res, {}).setdefault('height', int_or_none(addr.get('height')))
'acodec': 'aac',
'source_preference': -2 if 'aweme/v1' in url else -1, # Downloads from API might get blocked
**add_meta, **parsed_meta,
- # bytevc2 is bytedance's proprietary (unplayable) video codec
+ # bytevc2 is bytedance's own custom h266/vvc codec, as-of-yet unplayable
'preference': -100 if is_bytevc2 else -1,
'format_note': join_nonempty(
add_meta.get('format_note'), '(API)' if 'aweme/v1' in url else None,
formats = []
width = int_or_none(video_info.get('width'))
height = int_or_none(video_info.get('height'))
+ ratio = try_call(lambda: width / height) or 0.5625
if video_info.get('play_addr'):
formats.extend(extract_addr(video_info['play_addr'], {
'format_id': 'play_addr',
'format_id': 'download_addr',
'format_note': 'Download video%s' % (', watermarked' if video_info.get('has_watermark') else ''),
'vcodec': 'h264',
- 'width': dl_width or width,
- 'height': try_call(lambda: int(dl_width / 0.5625)) or height, # download_addr['height'] is wrong
+ 'width': dl_width,
+ 'height': try_call(lambda: int(dl_width / ratio)), # download_addr['height'] is wrong
'preference': -2 if video_info.get('has_watermark') else -1,
}))
if video_info.get('play_addr_h264'):
'album': str_or_none(music_info.get('album')) or None,
'artists': re.split(r'(?:, | & )', music_author) if music_author else None,
'formats': formats,
- 'subtitles': self.extract_subtitles(aweme_detail, aweme_id),
+ 'subtitles': self.extract_subtitles(aweme_detail, aweme_id, user_url),
'thumbnails': thumbnails,
'duration': int_or_none(traverse_obj(video_info, 'duration', ('download_addr', 'duration')), scale=1000),
'availability': self._availability(
formats = []
width = int_or_none(video_info.get('width'))
height = int_or_none(video_info.get('height'))
+ ratio = try_call(lambda: width / height) or 0.5625
+ COMMON_FORMAT_INFO = {
+ 'ext': 'mp4',
+ 'vcodec': 'h264',
+ 'acodec': 'aac',
+ }
+
+ for bitrate_info in traverse_obj(video_info, ('bitrateInfo', lambda _, v: v['PlayAddr']['UrlList'])):
+ format_info, res = self._parse_url_key(
+ traverse_obj(bitrate_info, ('PlayAddr', 'UrlKey', {str})) or '')
+ # bytevc2 is bytedance's own custom h266/vvc codec, as-of-yet unplayable
+ is_bytevc2 = format_info.get('vcodec') == 'bytevc2'
+ format_info.update({
+ 'format_note': 'UNPLAYABLE' if is_bytevc2 else None,
+ 'preference': -100 if is_bytevc2 else -1,
+ 'filesize': traverse_obj(bitrate_info, ('PlayAddr', 'DataSize', {int_or_none})),
+ })
+
+ if dimension := (res and int(res[:-1])):
+ if dimension == 540: # '540p' is actually 576p
+ dimension = 576
+ if ratio < 1: # portrait: res/dimension is width
+ y = int(dimension / ratio)
+ format_info.update({
+ 'width': dimension,
+ 'height': y - (y % 2),
+ })
+ else: # landscape: res/dimension is height
+ x = int(dimension * ratio)
+ format_info.update({
+ 'width': x - (x % 2),
+ 'height': dimension,
+ })
+
+ for video_url in traverse_obj(bitrate_info, ('PlayAddr', 'UrlList', ..., {url_or_none})):
+ formats.append({
+ **COMMON_FORMAT_INFO,
+ **format_info,
+ 'url': self._proto_relative_url(video_url),
+ })
+
+ # We don't have res string for play formats, but need quality for sorting & de-duplication
+ play_quality = traverse_obj(formats, (lambda _, v: v['width'] == width, 'quality', any))
for play_url in traverse_obj(video_info, ('playAddr', ((..., 'src'), None), {url_or_none})):
formats.append({
+ **COMMON_FORMAT_INFO,
+ 'format_id': 'play',
'url': self._proto_relative_url(play_url),
- 'ext': 'mp4',
'width': width,
'height': height,
+ 'quality': play_quality,
})
for download_url in traverse_obj(video_info, (('downloadAddr', ('download', 'url')), {url_or_none})):
formats.append({
+ **COMMON_FORMAT_INFO,
'format_id': 'download',
'url': self._proto_relative_url(download_url),
- 'ext': 'mp4',
- 'width': width,
- 'height': height,
})
self._remove_duplicate_formats(formats)
+ for f in traverse_obj(formats, lambda _, v: 'unwatermarked' not in v['url']):
+ f.update({
+ 'format_note': join_nonempty(f.get('format_note'), 'watermarked', delim=', '),
+ 'preference': f.get('preference') or -2,
+ })
+
+ # Is it a slideshow with only audio for download?
+ if not formats and traverse_obj(music_info, ('playUrl', {url_or_none})):
+ audio_url = music_info['playUrl']
+ ext = traverse_obj(parse_qs(audio_url), (
+ 'mime_type', -1, {lambda x: x.replace('_', '/')}, {mimetype2ext})) or 'm4a'
+ formats.append({
+ 'format_id': 'audio',
+ 'url': self._proto_relative_url(audio_url),
+ 'ext': ext,
+ 'acodec': 'aac' if ext == 'm4a' else ext,
+ 'vcodec': 'none',
+ })
+
thumbnails = []
for thumb_url in traverse_obj(aweme_detail, (
(None, 'video'), ('thumbnail', 'cover', 'dynamicCover', 'originCover'), {url_or_none})):
return {
'id': video_id,
+ **traverse_obj(music_info, {
+ 'track': ('title', {str}),
+ 'album': ('album', {str}, {lambda x: x or None}),
+ 'artists': ('authorName', {str}, {lambda x: [x] if x else None}),
+ 'duration': ('duration', {int_or_none}),
+ }),
**traverse_obj(aweme_detail, {
'title': ('desc', {str}),
'description': ('desc', {str}),
- 'duration': ('video', 'duration', {int_or_none}),
+ # audio-only slideshows have a video duration of 0 and an actual audio duration
+ 'duration': ('video', 'duration', {int_or_none}, {lambda x: x or None}),
'timestamp': ('createTime', {int_or_none}),
}),
**traverse_obj(author_info or aweme_detail, {
'repost_count': 'shareCount',
'comment_count': 'commentCount',
}, expected_type=int_or_none),
- **traverse_obj(music_info, {
- 'track': ('title', {str}),
- 'album': ('album', {str}, {lambda x: x or None}),
- 'artists': ('authorName', {str}, {lambda x: [x] if x else None}),
- }),
'channel_id': channel_id,
'uploader_url': user_url,
'formats': formats,
+ 'subtitles': self.extract_subtitles(aweme_detail, video_id, None),
'thumbnails': thumbnails,
'http_headers': {
'Referer': webpage_url,
def _real_extract(self, url):
video_id, user_id = self._match_valid_url(url).group('id', 'user_id')
- try:
- return self._extract_aweme_app(video_id)
- except ExtractorError as e:
- e.expected = True
- self.report_warning(f'{e}; trying with webpage')
- url = self._create_url(user_id, video_id)
- webpage = self._download_webpage(url, video_id, headers={'User-Agent': 'Mozilla/5.0'})
-
- if universal_data := self._get_universal_data(webpage, video_id):
- self.write_debug('Found universal data for rehydration')
- status = traverse_obj(universal_data, ('webapp.video-detail', 'statusCode', {int})) or 0
- video_data = traverse_obj(universal_data, ('webapp.video-detail', 'itemInfo', 'itemStruct', {dict}))
-
- elif sigi_data := self._get_sigi_state(webpage, video_id):
- self.write_debug('Found sigi state data')
- status = traverse_obj(sigi_data, ('VideoPage', 'statusCode', {int})) or 0
- video_data = traverse_obj(sigi_data, ('ItemModule', video_id, {dict}))
-
- elif next_data := self._search_nextjs_data(webpage, video_id, default='{}'):
- self.write_debug('Found next.js data')
- status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode', {int})) or 0
- video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct', {dict}))
+ if self._KNOWN_APP_INFO:
+ try:
+ return self._extract_aweme_app(video_id)
+ except ExtractorError as e:
+ e.expected = True
+ self.report_warning(f'{e}; trying with webpage')
- else:
- raise ExtractorError('Unable to extract webpage video data')
+ url = self._create_url(user_id, video_id)
+ video_data, status = self._extract_web_data_and_status(url, video_id)
if video_data and status == 0:
return self._parse_aweme_video_web(video_data, url, video_id)
'max_cursor': 0,
'min_cursor': 0,
'retry_type': 'no_retry',
- 'device_id': ''.join(random.choices(string.digits, k=19)), # Some endpoints don't like randomized device_id, so it isn't directly set in _call_api.
+ 'device_id': self._DEVICE_ID, # Some endpoints don't like randomized device_id, so it isn't directly set in _call_api.
}
for page in itertools.count(1):
'cursor': 0,
'count': 20,
'type': 5,
- 'device_id': ''.join(random.choices(string.digits, k=19))
+ 'device_id': self._DEVICE_ID,
}
for page in itertools.count(1):