]>
Commit | Line | Data |
---|---|---|
f5884801 PH |
1 | import datetime |
2 | import json | |
3 | import os | |
4 | import re | |
5 | import socket | |
6 | ||
7 | from .common import InfoExtractor | |
8 | from ..utils import ( | |
9 | compat_http_client, | |
10 | compat_parse_qs, | |
11 | compat_str, | |
12 | compat_urllib_error, | |
13 | compat_urllib_parse_urlparse, | |
14 | compat_urllib_request, | |
15 | ||
16 | ExtractorError, | |
17 | unescapeHTML, | |
18 | ) | |
19 | ||
20 | ||
21 | class BlipTVIE(InfoExtractor): | |
22 | """Information extractor for blip.tv""" | |
23 | ||
24 | _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$' | |
25 | _URL_EXT = r'^.*\.([a-z0-9]+)$' | |
26 | IE_NAME = u'blip.tv' | |
6f5ac90c PH |
27 | _TEST = { |
28 | u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', | |
29 | u'file': u'5779306.m4v', | |
d4da3d61 | 30 | u'md5': u'80baf1ec5c3d2019037c1c707d676b9f', |
6f5ac90c PH |
31 | u'info_dict': { |
32 | u"upload_date": u"20111205", | |
33 | u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596", | |
34 | u"uploader": u"Comic Book Resources - CBR TV", | |
35 | u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3" | |
36 | } | |
37 | } | |
f5884801 PH |
38 | |
39 | def report_direct_download(self, title): | |
40 | """Report information extraction.""" | |
41 | self.to_screen(u'%s: Direct download detected' % title) | |
42 | ||
43 | def _real_extract(self, url): | |
44 | mobj = re.match(self._VALID_URL, url) | |
45 | if mobj is None: | |
46 | raise ExtractorError(u'Invalid URL: %s' % url) | |
47 | ||
48 | # See https://github.com/rg3/youtube-dl/issues/857 | |
49 | api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url) | |
50 | if api_mobj is not None: | |
51 | url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id') | |
52 | urlp = compat_urllib_parse_urlparse(url) | |
53 | if urlp.path.startswith('/play/'): | |
54 | request = compat_urllib_request.Request(url) | |
55 | response = compat_urllib_request.urlopen(request) | |
56 | redirecturl = response.geturl() | |
57 | rurlp = compat_urllib_parse_urlparse(redirecturl) | |
58 | file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2] | |
59 | url = 'http://blip.tv/a/a-' + file_id | |
60 | return self._real_extract(url) | |
61 | ||
62 | ||
63 | if '?' in url: | |
64 | cchar = '&' | |
65 | else: | |
66 | cchar = '?' | |
67 | json_url = url + cchar + 'skin=json&version=2&no_wrap=1' | |
68 | request = compat_urllib_request.Request(json_url) | |
69 | request.add_header('User-Agent', 'iTunes/10.6.1') | |
70 | self.report_extraction(mobj.group(1)) | |
71 | info = None | |
72 | try: | |
73 | urlh = compat_urllib_request.urlopen(request) | |
74 | if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download | |
75 | basename = url.split('/')[-1] | |
76 | title,ext = os.path.splitext(basename) | |
77 | title = title.decode('UTF-8') | |
78 | ext = ext.replace('.', '') | |
79 | self.report_direct_download(title) | |
80 | info = { | |
81 | 'id': title, | |
82 | 'url': url, | |
83 | 'uploader': None, | |
84 | 'upload_date': None, | |
85 | 'title': title, | |
86 | 'ext': ext, | |
87 | 'urlhandle': urlh | |
88 | } | |
89 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
90 | raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) | |
91 | if info is None: # Regular URL | |
92 | try: | |
93 | json_code_bytes = urlh.read() | |
94 | json_code = json_code_bytes.decode('utf-8') | |
95 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
96 | raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err)) | |
97 | ||
98 | try: | |
99 | json_data = json.loads(json_code) | |
100 | if 'Post' in json_data: | |
101 | data = json_data['Post'] | |
102 | else: | |
103 | data = json_data | |
104 | ||
105 | upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') | |
d4da3d61 JMF |
106 | if 'additionalMedia' in data: |
107 | formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height'])) | |
108 | best_format = formats[-1] | |
109 | video_url = best_format['url'] | |
110 | else: | |
111 | video_url = data['media']['url'] | |
f5884801 PH |
112 | umobj = re.match(self._URL_EXT, video_url) |
113 | if umobj is None: | |
114 | raise ValueError('Can not determine filename extension') | |
115 | ext = umobj.group(1) | |
116 | ||
117 | info = { | |
118 | 'id': data['item_id'], | |
119 | 'url': video_url, | |
120 | 'uploader': data['display_name'], | |
121 | 'upload_date': upload_date, | |
122 | 'title': data['title'], | |
123 | 'ext': ext, | |
124 | 'format': data['media']['mimeType'], | |
125 | 'thumbnail': data['thumbnailUrl'], | |
126 | 'description': data['description'], | |
127 | 'player_url': data['embedUrl'], | |
128 | 'user_agent': 'iTunes/10.6.1', | |
129 | } | |
130 | except (ValueError,KeyError) as err: | |
131 | raise ExtractorError(u'Unable to parse video information: %s' % repr(err)) | |
132 | ||
133 | return [info] | |
134 | ||
135 | ||
136 | class BlipTVUserIE(InfoExtractor): | |
137 | """Information Extractor for blip.tv users.""" | |
138 | ||
139 | _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' | |
140 | _PAGE_SIZE = 12 | |
141 | IE_NAME = u'blip.tv:user' | |
142 | ||
143 | def _real_extract(self, url): | |
144 | # Extract username | |
145 | mobj = re.match(self._VALID_URL, url) | |
146 | if mobj is None: | |
147 | raise ExtractorError(u'Invalid URL: %s' % url) | |
148 | ||
149 | username = mobj.group(1) | |
150 | ||
151 | page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' | |
152 | ||
153 | page = self._download_webpage(url, username, u'Downloading user page') | |
154 | mobj = re.search(r'data-users-id="([^"]+)"', page) | |
155 | page_base = page_base % mobj.group(1) | |
156 | ||
157 | ||
158 | # Download video ids using BlipTV Ajax calls. Result size per | |
159 | # query is limited (currently to 12 videos) so we need to query | |
160 | # page by page until there are no video ids - it means we got | |
161 | # all of them. | |
162 | ||
163 | video_ids = [] | |
164 | pagenum = 1 | |
165 | ||
166 | while True: | |
167 | url = page_base + "&page=" + str(pagenum) | |
168 | page = self._download_webpage(url, username, | |
169 | u'Downloading video ids from page %d' % pagenum) | |
170 | ||
171 | # Extract video identifiers | |
172 | ids_in_page = [] | |
173 | ||
174 | for mobj in re.finditer(r'href="/([^"]+)"', page): | |
175 | if mobj.group(1) not in ids_in_page: | |
176 | ids_in_page.append(unescapeHTML(mobj.group(1))) | |
177 | ||
178 | video_ids.extend(ids_in_page) | |
179 | ||
180 | # A little optimization - if current page is not | |
181 | # "full", ie. does not contain PAGE_SIZE video ids then | |
182 | # we can assume that this page is the last one - there | |
183 | # are no more ids on further pages - no need to query | |
184 | # again. | |
185 | ||
186 | if len(ids_in_page) < self._PAGE_SIZE: | |
187 | break | |
188 | ||
189 | pagenum += 1 | |
190 | ||
191 | urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids] | |
20c3893f | 192 | url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls] |
f5884801 | 193 | return [self.playlist_result(url_entries, playlist_title = username)] |