]>
Commit | Line | Data |
---|---|---|
f5884801 PH |
1 | import datetime |
2 | import json | |
f5884801 PH |
3 | import re |
4 | import socket | |
5 | ||
6 | from .common import InfoExtractor | |
7 | from ..utils import ( | |
8 | compat_http_client, | |
9 | compat_parse_qs, | |
10 | compat_str, | |
11 | compat_urllib_error, | |
12 | compat_urllib_parse_urlparse, | |
13 | compat_urllib_request, | |
14 | ||
15 | ExtractorError, | |
16 | unescapeHTML, | |
17 | ) | |
18 | ||
19 | ||
20 | class BlipTVIE(InfoExtractor): | |
21 | """Information extractor for blip.tv""" | |
22 | ||
1538eff6 | 23 | _VALID_URL = r'^(?:https?://)?(?:www\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$' |
f5884801 PH |
24 | _URL_EXT = r'^.*\.([a-z0-9]+)$' |
25 | IE_NAME = u'blip.tv' | |
6f5ac90c PH |
26 | _TEST = { |
27 | u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', | |
28 | u'file': u'5779306.m4v', | |
d4da3d61 | 29 | u'md5': u'80baf1ec5c3d2019037c1c707d676b9f', |
6f5ac90c PH |
30 | u'info_dict': { |
31 | u"upload_date": u"20111205", | |
32 | u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596", | |
33 | u"uploader": u"Comic Book Resources - CBR TV", | |
34 | u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3" | |
35 | } | |
36 | } | |
f5884801 PH |
37 | |
38 | def report_direct_download(self, title): | |
39 | """Report information extraction.""" | |
40 | self.to_screen(u'%s: Direct download detected' % title) | |
41 | ||
42 | def _real_extract(self, url): | |
43 | mobj = re.match(self._VALID_URL, url) | |
44 | if mobj is None: | |
45 | raise ExtractorError(u'Invalid URL: %s' % url) | |
46 | ||
47 | # See https://github.com/rg3/youtube-dl/issues/857 | |
48 | api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url) | |
49 | if api_mobj is not None: | |
50 | url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id') | |
51 | urlp = compat_urllib_parse_urlparse(url) | |
52 | if urlp.path.startswith('/play/'): | |
baa7b197 | 53 | response = self._request_webpage(url, None, False) |
f5884801 PH |
54 | redirecturl = response.geturl() |
55 | rurlp = compat_urllib_parse_urlparse(redirecturl) | |
56 | file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2] | |
57 | url = 'http://blip.tv/a/a-' + file_id | |
58 | return self._real_extract(url) | |
59 | ||
f5884801 PH |
60 | if '?' in url: |
61 | cchar = '&' | |
62 | else: | |
63 | cchar = '?' | |
64 | json_url = url + cchar + 'skin=json&version=2&no_wrap=1' | |
65 | request = compat_urllib_request.Request(json_url) | |
66 | request.add_header('User-Agent', 'iTunes/10.6.1') | |
67 | self.report_extraction(mobj.group(1)) | |
baa7b197 JMF |
68 | urlh = self._request_webpage(request, None, False, |
69 | u'unable to download video info webpage') | |
466617f5 | 70 | |
466617f5 PH |
71 | try: |
72 | json_code_bytes = urlh.read() | |
73 | json_code = json_code_bytes.decode('utf-8') | |
74 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
75 | raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err)) | |
76 | ||
77 | try: | |
78 | json_data = json.loads(json_code) | |
79 | if 'Post' in json_data: | |
80 | data = json_data['Post'] | |
81 | else: | |
82 | data = json_data | |
83 | ||
84 | upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') | |
85 | if 'additionalMedia' in data: | |
86 | formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height'])) | |
87 | best_format = formats[-1] | |
88 | video_url = best_format['url'] | |
89 | else: | |
90 | video_url = data['media']['url'] | |
91 | umobj = re.match(self._URL_EXT, video_url) | |
92 | if umobj is None: | |
93 | raise ValueError('Can not determine filename extension') | |
94 | ext = umobj.group(1) | |
95 | ||
96 | return { | |
97 | 'id': compat_str(data['item_id']), | |
98 | 'url': video_url, | |
99 | 'uploader': data['display_name'], | |
100 | 'upload_date': upload_date, | |
101 | 'title': data['title'], | |
102 | 'ext': ext, | |
103 | 'format': data['media']['mimeType'], | |
104 | 'thumbnail': data['thumbnailUrl'], | |
105 | 'description': data['description'], | |
106 | 'player_url': data['embedUrl'], | |
107 | 'user_agent': 'iTunes/10.6.1', | |
108 | } | |
109 | except (ValueError, KeyError) as err: | |
110 | raise ExtractorError(u'Unable to parse video information: %s' % repr(err)) | |
f5884801 PH |
111 | |
112 | ||
113 | class BlipTVUserIE(InfoExtractor): | |
114 | """Information Extractor for blip.tv users.""" | |
115 | ||
116 | _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' | |
117 | _PAGE_SIZE = 12 | |
118 | IE_NAME = u'blip.tv:user' | |
119 | ||
120 | def _real_extract(self, url): | |
121 | # Extract username | |
122 | mobj = re.match(self._VALID_URL, url) | |
123 | if mobj is None: | |
124 | raise ExtractorError(u'Invalid URL: %s' % url) | |
125 | ||
126 | username = mobj.group(1) | |
127 | ||
128 | page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' | |
129 | ||
130 | page = self._download_webpage(url, username, u'Downloading user page') | |
131 | mobj = re.search(r'data-users-id="([^"]+)"', page) | |
132 | page_base = page_base % mobj.group(1) | |
133 | ||
134 | ||
135 | # Download video ids using BlipTV Ajax calls. Result size per | |
136 | # query is limited (currently to 12 videos) so we need to query | |
137 | # page by page until there are no video ids - it means we got | |
138 | # all of them. | |
139 | ||
140 | video_ids = [] | |
141 | pagenum = 1 | |
142 | ||
143 | while True: | |
144 | url = page_base + "&page=" + str(pagenum) | |
145 | page = self._download_webpage(url, username, | |
146 | u'Downloading video ids from page %d' % pagenum) | |
147 | ||
148 | # Extract video identifiers | |
149 | ids_in_page = [] | |
150 | ||
151 | for mobj in re.finditer(r'href="/([^"]+)"', page): | |
152 | if mobj.group(1) not in ids_in_page: | |
153 | ids_in_page.append(unescapeHTML(mobj.group(1))) | |
154 | ||
155 | video_ids.extend(ids_in_page) | |
156 | ||
157 | # A little optimization - if current page is not | |
158 | # "full", ie. does not contain PAGE_SIZE video ids then | |
159 | # we can assume that this page is the last one - there | |
160 | # are no more ids on further pages - no need to query | |
161 | # again. | |
162 | ||
163 | if len(ids_in_page) < self._PAGE_SIZE: | |
164 | break | |
165 | ||
166 | pagenum += 1 | |
167 | ||
168 | urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids] | |
20c3893f | 169 | url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls] |
f5884801 | 170 | return [self.playlist_result(url_entries, playlist_title = username)] |