]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/lrt.py
[cleanup] Upgrade syntax
[yt-dlp.git] / yt_dlp / extractor / lrt.py
1 from .common import InfoExtractor
2 from ..utils import (
3 clean_html,
4 merge_dicts,
5 )
6
7
8 class LRTIE(InfoExtractor):
9 IE_NAME = 'lrt.lt'
10 _VALID_URL = r'https?://(?:www\.)?lrt\.lt(?P<path>/mediateka/irasas/(?P<id>[0-9]+))'
11 _TESTS = [{
12 # m3u8 download
13 'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
14 'md5': '85cb2bb530f31d91a9c65b479516ade4',
15 'info_dict': {
16 'id': '2000127261',
17 'ext': 'mp4',
18 'title': 'Greita ir gardu: Sicilijos įkvėpta klasikinių makaronų su baklažanais vakarienė',
19 'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
20 'duration': 3035,
21 'timestamp': 1604079000,
22 'upload_date': '20201030',
23 },
24 }, {
25 # direct mp3 download
26 'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/',
27 'md5': '389da8ca3cad0f51d12bed0c844f6a0a',
28 'info_dict': {
29 'id': '1013074524',
30 'ext': 'mp3',
31 'title': 'Kita tema 2016-09-05 15:05',
32 'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5',
33 'duration': 3008,
34 'view_count': int,
35 'like_count': int,
36 },
37 }]
38
39 def _extract_js_var(self, webpage, var_name, default):
40 return self._search_regex(
41 r'%s\s*=\s*(["\'])((?:(?!\1).)+)\1' % var_name,
42 webpage, var_name.replace('_', ' '), default, group=2)
43
44 def _real_extract(self, url):
45 path, video_id = self._match_valid_url(url).groups()
46 webpage = self._download_webpage(url, video_id)
47
48 media_url = self._extract_js_var(webpage, 'main_url', path)
49 media = self._download_json(self._extract_js_var(
50 webpage, 'media_info_url',
51 'https://www.lrt.lt/servisai/stream_url/vod/media_info/'),
52 video_id, query={'url': media_url})
53 jw_data = self._parse_jwplayer_data(
54 media['playlist_item'], video_id, base_url=url)
55
56 json_ld_data = self._search_json_ld(webpage, video_id)
57
58 tags = []
59 for tag in (media.get('tags') or []):
60 tag_name = tag.get('name')
61 if not tag_name:
62 continue
63 tags.append(tag_name)
64
65 clean_info = {
66 'description': clean_html(media.get('content')),
67 'tags': tags,
68 }
69
70 return merge_dicts(clean_info, jw_data, json_ld_data)