2 from __future__
import unicode_literals
7 from .common
import InfoExtractor
8 from ..compat
import compat_str
20 class LBRYBaseIE(InfoExtractor
):
21 _BASE_URL_REGEX
= r
'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/'
22 _CLAIM_ID_REGEX
= r
'[0-9a-f]{1,40}'
23 _OPT_CLAIM_ID
= '[^:/?#&]+(?::%s)?' % _CLAIM_ID_REGEX
24 _SUPPORTED_STREAM_TYPES
= ['video', 'audio']
26 def _call_api_proxy(self
, method
, display_id
, params
, resource
):
27 return self
._download
_json
(
28 'https://api.lbry.tv/api/v1/proxy',
29 display_id
, 'Downloading %s JSON metadata' % resource
,
30 headers
={'Content-Type': 'application/json-rpc'}
,
34 }).encode())['result']
36 def _resolve_url(self
, url
, display_id
, resource
):
37 return self
._call
_api
_proxy
(
38 'resolve', display_id
, {'urls': url}
, resource
)[url
]
40 def _permanent_url(self
, url
, claim_name
, claim_id
):
41 return urljoin(url
, '/%s:%s' % (claim_name
, claim_id
))
43 def _parse_stream(self
, stream
, url
):
44 stream_value
= stream
.get('value') or {}
45 stream_type
= stream_value
.get('stream_type')
46 source
= stream_value
.get('source') or {}
47 media
= stream_value
.get(stream_type
) or {}
48 signing_channel
= stream
.get('signing_channel') or {}
49 channel_name
= signing_channel
.get('name')
50 channel_claim_id
= signing_channel
.get('claim_id')
52 if channel_name
and channel_claim_id
:
53 channel_url
= self
._permanent
_url
(url
, channel_name
, channel_claim_id
)
56 'thumbnail': try_get(stream_value
, lambda x
: x
['thumbnail']['url'], compat_str
),
57 'description': stream_value
.get('description'),
58 'license': stream_value
.get('license'),
59 'timestamp': int_or_none(stream
.get('timestamp')),
60 'tags': stream_value
.get('tags'),
61 'duration': int_or_none(media
.get('duration')),
62 'channel': try_get(signing_channel
, lambda x
: x
['value']['title']),
63 'channel_id': channel_claim_id
,
64 'channel_url': channel_url
,
65 'ext': determine_ext(source
.get('name')) or mimetype2ext(source
.get('media_type')),
66 'filesize': int_or_none(source
.get('size')),
68 if stream_type
== 'audio':
69 info
['vcodec'] = 'none'
72 'width': int_or_none(media
.get('width')),
73 'height': int_or_none(media
.get('height')),
78 class LBRYIE(LBRYBaseIE
):
80 _VALID_URL
= LBRYBaseIE
._BASE
_URL
_REGEX
+ r
'(?P<id>\$/[^/]+/[^/]+/{1}|@{0}/{0}|(?!@){0})'.format(LBRYBaseIE
._OPT
_CLAIM
_ID
, LBRYBaseIE
._CLAIM
_ID
_REGEX
)
83 'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
84 'md5': '65bd7ec1f6744ada55da8e4c48a2edf9',
86 'id': '17f983b61f53091fb8ea58a9c56804e4ff8cff4d',
88 'title': 'First day in LBRY? Start HERE!',
89 'description': 'md5:f6cb5c704b332d37f5119313c2c98f51',
90 'timestamp': 1595694354,
91 'upload_date': '20200725',
97 'url': 'https://lbry.tv/@LBRYFoundation:0/Episode-1:e',
98 'md5': 'c94017d3eba9b49ce085a8fad6b98d00',
100 'id': 'e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
102 'title': 'The LBRY Foundation Community Podcast Episode 1 - Introduction, Streaming on LBRY, Transcoding',
103 'description': 'md5:661ac4f1db09f31728931d7b88807a61',
104 'timestamp': 1591312601,
105 'upload_date': '20200604',
108 'channel': 'The LBRY Foundation',
109 'channel_id': '0ed629d2b9c601300cacf7eabe9da0be79010212',
110 'channel_url': 'https://lbry.tv/@LBRYFoundation:0ed629d2b9c601300cacf7eabe9da0be79010212',
114 'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
115 'only_matching': True,
117 'url': "https://odysee.com/@ScammerRevolts:b0/I-SYSKEY'D-THE-SAME-SCAMMERS-3-TIMES!:b",
118 'only_matching': True,
120 'url': 'https://lbry.tv/Episode-1:e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
121 'only_matching': True,
123 'url': 'https://lbry.tv/$/embed/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
124 'only_matching': True,
126 'url': 'https://lbry.tv/Episode-1:e7',
127 'only_matching': True,
129 'url': 'https://lbry.tv/@LBRYFoundation/Episode-1',
130 'only_matching': True,
132 'url': 'https://lbry.tv/$/download/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
133 'only_matching': True,
136 def _real_extract(self
, url
):
137 display_id
= self
._match
_id
(url
)
138 if display_id
.startswith('$/'):
139 display_id
= display_id
.split('/', 2)[-1].replace('/', ':')
141 display_id
= display_id
.replace(':', '#')
142 uri
= 'lbry://' + display_id
143 result
= self
._resolve
_url
(uri
, display_id
, 'stream')
144 result_value
= result
['value']
145 if result_value
.get('stream_type') not in self
._SUPPORTED
_STREAM
_TYPES
:
146 raise ExtractorError('Unsupported URL', expected
=True)
147 claim_id
= result
['claim_id']
148 title
= result_value
['title']
149 streaming_url
= self
._call
_api
_proxy
(
150 'get', claim_id
, {'uri': uri}
, 'streaming url')['streaming_url']
151 info
= self
._parse
_stream
(result
, url
)
155 'url': streaming_url
,
160 class LBRYChannelIE(LBRYBaseIE
):
161 IE_NAME
= 'lbry:channel'
162 _VALID_URL
= LBRYBaseIE
._BASE
_URL
_REGEX
+ r
'(?P<id>@%s)/?(?:[?#&]|$)' % LBRYBaseIE
._OPT
_CLAIM
_ID
164 'url': 'https://lbry.tv/@LBRYFoundation:0',
166 'id': '0ed629d2b9c601300cacf7eabe9da0be79010212',
167 'title': 'The LBRY Foundation',
168 'description': 'Channel for the LBRY Foundation. Follow for updates and news.',
170 'playlist_count': 29,
172 'url': 'https://lbry.tv/@LBRYFoundation',
173 'only_matching': True,
177 def _fetch_page(self
, claim_id
, url
, page
):
179 result
= self
._call
_api
_proxy
(
180 'claim_search', claim_id
, {
181 'channel_ids': [claim_id
],
182 'claim_type': 'stream',
185 'page_size': self
._PAGE
_SIZE
,
186 'stream_types': self
._SUPPORTED
_STREAM
_TYPES
,
188 for item
in (result
.get('items') or []):
189 stream_claim_name
= item
.get('name')
190 stream_claim_id
= item
.get('claim_id')
191 if not (stream_claim_name
and stream_claim_id
):
194 info
= self
._parse
_stream
(item
, url
)
197 'id': stream_claim_id
,
198 'title': try_get(item
, lambda x
: x
['value']['title']),
199 'url': self
._permanent
_url
(url
, stream_claim_name
, stream_claim_id
),
203 def _real_extract(self
, url
):
204 display_id
= self
._match
_id
(url
).replace(':', '#')
205 result
= self
._resolve
_url
(
206 'lbry://' + display_id
, display_id
, 'channel')
207 claim_id
= result
['claim_id']
208 entries
= OnDemandPagedList(
209 functools
.partial(self
._fetch
_page
, claim_id
, url
),
211 result_value
= result
.get('value') or {}
212 return self
.playlist_result(
213 entries
, claim_id
, result_value
.get('title'),
214 result_value
.get('description'))