]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/wat.py
Merge remote-tracking branch 'JGjorgji/fix-leading-zeroes'
[yt-dlp.git] / youtube_dl / extractor / wat.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5 import time
6 import hashlib
7
8 from .common import InfoExtractor
9 from ..utils import (
10 unified_strdate,
11 )
12
13
14 class WatIE(InfoExtractor):
15 _VALID_URL = r'http://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html'
16 IE_NAME = 'wat.tv'
17 _TEST = {
18 'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html',
19 'md5': 'ce70e9223945ed26a8056d413ca55dc9',
20 'info_dict': {
21 'id': '11713067',
22 'display_id': 'soupe-figues-l-orange-aux-epices',
23 'ext': 'mp4',
24 'title': 'Soupe de figues à l\'orange et aux épices',
25 'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.',
26 'upload_date': '20140819',
27 'duration': 120,
28 },
29 }
30
31 def download_video_info(self, real_id):
32 # 'contentv4' is used in the website, but it also returns the related
33 # videos, we don't need them
34 info = self._download_json('http://www.wat.tv/interface/contentv3/' + real_id, real_id)
35 return info['media']
36
37 def _real_extract(self, url):
38 def real_id_for_chapter(chapter):
39 return chapter['tc_start'].split('-')[0]
40 mobj = re.match(self._VALID_URL, url)
41 short_id = mobj.group('short_id')
42 display_id = mobj.group('display_id')
43 webpage = self._download_webpage(url, display_id or short_id)
44 real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
45
46 video_info = self.download_video_info(real_id)
47
48 if video_info.get('geolock'):
49 self.report_warning(
50 'This content is marked as not available in your area. Trying anyway ..')
51
52 chapters = video_info['chapters']
53 first_chapter = chapters[0]
54 files = video_info['files']
55 first_file = files[0]
56
57 if real_id_for_chapter(first_chapter) != real_id:
58 self.to_screen('Multipart video detected')
59 chapter_urls = []
60 for chapter in chapters:
61 chapter_id = real_id_for_chapter(chapter)
62 # Yes, when we this chapter is processed by WatIE,
63 # it will download the info again
64 chapter_info = self.download_video_info(chapter_id)
65 chapter_urls.append(chapter_info['url'])
66 entries = [self.url_result(chapter_url) for chapter_url in chapter_urls]
67 return self.playlist_result(entries, real_id, video_info['title'])
68
69 upload_date = None
70 if 'date_diffusion' in first_chapter:
71 upload_date = unified_strdate(first_chapter['date_diffusion'])
72 # Otherwise we can continue and extract just one part, we have to use
73 # the short id for getting the video url
74
75 formats = [{
76 'url': 'http://wat.tv/get/android5/%s.mp4' % real_id,
77 'format_id': 'Mobile',
78 }]
79
80 fmts = [('SD', 'web')]
81 if first_file.get('hasHD'):
82 fmts.append(('HD', 'webhd'))
83
84 def compute_token(param):
85 timestamp = '%08x' % int(time.time())
86 magic = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564'
87 return '%s/%s' % (hashlib.md5((magic + param + timestamp).encode('ascii')).hexdigest(), timestamp)
88
89 for fmt in fmts:
90 webid = '/%s/%s' % (fmt[1], real_id)
91 video_url = self._download_webpage(
92 'http://www.wat.tv/get%s?token=%s&getURL=1' % (webid, compute_token(webid)),
93 real_id,
94 'Downloding %s video URL' % fmt[0],
95 'Failed to download %s video URL' % fmt[0],
96 False)
97 if not video_url:
98 continue
99 formats.append({
100 'url': video_url,
101 'ext': 'mp4',
102 'format_id': fmt[0],
103 })
104
105 return {
106 'id': real_id,
107 'display_id': display_id,
108 'title': first_chapter['title'],
109 'thumbnail': first_chapter['preview'],
110 'description': first_chapter['description'],
111 'view_count': video_info['views'],
112 'upload_date': upload_date,
113 'duration': first_file['duration'],
114 'formats': formats,
115 }