]>
jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/voicy.py
2 from __future__
import unicode_literals
4 from .common
import InfoExtractor
5 from ..compat
import compat_str
17 class VoicyBaseIE(InfoExtractor
):
18 def _extract_from_playlist_data(self
, value
):
19 voice_id
= compat_str(value
.get('PlaylistId'))
20 upload_date
= unified_strdate(value
.get('Published'), False)
21 items
= [self
._extract
_single
_article
(voice_data
) for voice_data
in value
['VoiceData']]
23 '_type': 'multi_video',
26 'title': compat_str(value
.get('PlaylistName')),
27 'uploader': value
.get('SpeakerName'),
28 'uploader_id': compat_str(value
.get('SpeakerId')),
29 'channel': value
.get('ChannelName'),
30 'channel_id': compat_str(value
.get('ChannelId')),
31 'upload_date': upload_date
,
34 def _extract_single_article(self
, entry
):
36 'url': entry
['VoiceHlsFile'],
41 'protocol': 'm3u8_native',
43 'url': entry
['VoiceFile'],
49 self
._sort
_formats
(formats
)
51 'id': compat_str(entry
.get('ArticleId')),
52 'title': entry
.get('ArticleTitle'),
53 'description': entry
.get('MediaName'),
57 def _call_api(self
, url
, video_id
, **kwargs
):
58 response
= self
._download
_json
(url
, video_id
, **kwargs
)
59 if response
.get('Status') != 0:
60 message
= traverse_obj(response
, ('Value', 'Error', 'Message'), expected_type
=compat_str
)
62 message
= 'There was a error in the response: %d' % response
.get('Status')
63 raise ExtractorError(message
, expected
=False)
64 return response
.get('Value')
67 class VoicyIE(VoicyBaseIE
):
69 _VALID_URL
= r
'https?://voicy\.jp/channel/(?P<channel_id>\d+)/(?P<id>\d+)'
70 ARTICLE_LIST_API_URL
= 'https://vmw.api.voicy.jp/articles_list?channel_id=%s&pid=%s'
72 'url': 'https://voicy.jp/channel/1253/122754',
75 'title': '1/21(木)声日記:ついに原稿終わった!!',
76 'uploader': 'ちょまど@ ITエンジニアなオタク',
77 'uploader_id': '7339',
79 'playlist_mincount': 9,
82 def _real_extract(self
, url
):
83 mobj
= self
._match
_valid
_url
(url
)
85 voice_id
= mobj
.group('id')
86 channel_id
= mobj
.group('channel_id')
87 url
, article_list
= unsmuggle_url(url
)
89 article_list
= self
._call
_api
(self
.ARTICLE_LIST_API_URL
% (channel_id
, voice_id
), voice_id
)
90 return self
._extract
_from
_playlist
_data
(article_list
)
93 class VoicyChannelIE(VoicyBaseIE
):
94 IE_NAME
= 'voicy:channel'
95 _VALID_URL
= r
'https?://voicy\.jp/channel/(?P<id>\d+)'
96 PROGRAM_LIST_API_URL
= 'https://vmw.api.voicy.jp/program_list/all?channel_id=%s&limit=20&public_type=3%s'
98 'url': 'https://voicy.jp/channel/1253/',
101 'title': 'ゆるふわ日常ラジオ #ちょまラジ',
102 'uploader': 'ちょまど@ ITエンジニアなオタク',
103 'uploader_id': '7339',
105 'playlist_mincount': 54,
109 def suitable(cls
, url
):
110 return not VoicyIE
.suitable(url
) and super(VoicyChannelIE
, cls
).suitable(url
)
112 def _entries(self
, channel_id
):
114 for count
in itertools
.count(1):
115 article_list
= self
._call
_api
(self
.PROGRAM_LIST_API_URL
% (channel_id
, pager
), channel_id
, note
='Paging #%d' % count
)
116 playlist_data
= article_list
.get('PlaylistData')
117 if not playlist_data
:
119 yield from playlist_data
120 last
= playlist_data
[-1]
121 pager
= '&pid=%d&p_date=%s&play_count=%s' % (last
['PlaylistId'], last
['Published'], last
['PlayCount'])
123 def _real_extract(self
, url
):
124 channel_id
= self
._match
_id
(url
)
125 articles
= self
._entries
(channel_id
)
127 first_article
= next(articles
, None)
128 title
= traverse_obj(first_article
, ('ChannelName', ), expected_type
=compat_str
)
129 speaker_name
= traverse_obj(first_article
, ('SpeakerName', ), expected_type
=compat_str
)
130 if not title
and speaker_name
:
131 title
= 'Uploads from %s' % speaker_name
133 title
= 'Uploads from channel ID %s' % channel_id
135 articles
= itertools
.chain([first_article
], articles
) if first_article
else articles
138 self
.url_result(smuggle_url('https://voicy.jp/channel/%s/%d' % (channel_id
, value
['PlaylistId']), value
), VoicyIE
.ie_key())
139 for value
in articles
)
145 'channel': speaker_name
,
146 'channel_id': channel_id
,