]>
Commit | Line | Data |
---|---|---|
1 | from __future__ import unicode_literals | |
2 | ||
3 | import errno | |
4 | import os | |
5 | import socket | |
6 | import time | |
7 | import re | |
8 | ||
9 | from .common import FileDownloader | |
10 | from ..compat import compat_urllib_error | |
11 | from ..utils import ( | |
12 | ContentTooShortError, | |
13 | encodeFilename, | |
14 | sanitize_open, | |
15 | sanitized_Request, | |
16 | write_xattr, | |
17 | XAttrMetadataError, | |
18 | XAttrUnavailableError, | |
19 | ) | |
20 | ||
21 | ||
22 | class HttpFD(FileDownloader): | |
23 | def real_download(self, filename_or_stream, info_dict): | |
24 | url = info_dict['url'] | |
25 | filename = filename_or_stream | |
26 | stream = None | |
27 | if hasattr(filename_or_stream, 'write'): | |
28 | stream = filename_or_stream | |
29 | filename = '-' | |
30 | tmpfilename = self.temp_name(filename) | |
31 | ||
32 | # Do not include the Accept-Encoding header | |
33 | headers = {'Youtubedl-no-compression': 'True'} | |
34 | add_headers = info_dict.get('http_headers') | |
35 | if add_headers: | |
36 | headers.update(add_headers) | |
37 | basic_request = sanitized_Request(url, None, headers) | |
38 | request = sanitized_Request(url, None, headers) | |
39 | ||
40 | is_test = self.params.get('test', False) | |
41 | ||
42 | if is_test: | |
43 | request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1)) | |
44 | ||
45 | # Establish possible resume length | |
46 | if os.path.isfile(encodeFilename(tmpfilename)): | |
47 | resume_len = os.path.getsize(encodeFilename(tmpfilename)) | |
48 | else: | |
49 | resume_len = 0 | |
50 | ||
51 | open_mode = 'wb' | |
52 | if resume_len != 0: | |
53 | if self.params.get('continuedl', True): | |
54 | self.report_resuming_byte(resume_len) | |
55 | request.add_header('Range', 'bytes=%d-' % resume_len) | |
56 | open_mode = 'ab' | |
57 | else: | |
58 | resume_len = 0 | |
59 | ||
60 | count = 0 | |
61 | retries = self.params.get('retries', 0) | |
62 | while count <= retries: | |
63 | # Establish connection | |
64 | try: | |
65 | data = self.ydl.urlopen(request) | |
66 | # When trying to resume, Content-Range HTTP header of response has to be checked | |
67 | # to match the value of requested Range HTTP header. This is due to a webservers | |
68 | # that don't support resuming and serve a whole file with no Content-Range | |
69 | # set in response despite of requested Range (see | |
70 | # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799) | |
71 | if resume_len > 0: | |
72 | content_range = data.headers.get('Content-Range') | |
73 | if content_range: | |
74 | content_range_m = re.search(r'bytes (\d+)-', content_range) | |
75 | # Content-Range is present and matches requested Range, resume is possible | |
76 | if content_range_m and resume_len == int(content_range_m.group(1)): | |
77 | break | |
78 | # Content-Range is either not present or invalid. Assuming remote webserver is | |
79 | # trying to send the whole file, resume is not possible, so wiping the local file | |
80 | # and performing entire redownload | |
81 | self.report_unable_to_resume() | |
82 | resume_len = 0 | |
83 | open_mode = 'wb' | |
84 | break | |
85 | except (compat_urllib_error.HTTPError, ) as err: | |
86 | if (err.code < 500 or err.code >= 600) and err.code != 416: | |
87 | # Unexpected HTTP error | |
88 | raise | |
89 | elif err.code == 416: | |
90 | # Unable to resume (requested range not satisfiable) | |
91 | try: | |
92 | # Open the connection again without the range header | |
93 | data = self.ydl.urlopen(basic_request) | |
94 | content_length = data.info()['Content-Length'] | |
95 | except (compat_urllib_error.HTTPError, ) as err: | |
96 | if err.code < 500 or err.code >= 600: | |
97 | raise | |
98 | else: | |
99 | # Examine the reported length | |
100 | if (content_length is not None and | |
101 | (resume_len - 100 < int(content_length) < resume_len + 100)): | |
102 | # The file had already been fully downloaded. | |
103 | # Explanation to the above condition: in issue #175 it was revealed that | |
104 | # YouTube sometimes adds or removes a few bytes from the end of the file, | |
105 | # changing the file size slightly and causing problems for some users. So | |
106 | # I decided to implement a suggested change and consider the file | |
107 | # completely downloaded if the file size differs less than 100 bytes from | |
108 | # the one in the hard drive. | |
109 | self.report_file_already_downloaded(filename) | |
110 | self.try_rename(tmpfilename, filename) | |
111 | self._hook_progress({ | |
112 | 'filename': filename, | |
113 | 'status': 'finished', | |
114 | 'downloaded_bytes': resume_len, | |
115 | 'total_bytes': resume_len, | |
116 | }) | |
117 | return True | |
118 | else: | |
119 | # The length does not match, we start the download over | |
120 | self.report_unable_to_resume() | |
121 | resume_len = 0 | |
122 | open_mode = 'wb' | |
123 | break | |
124 | except socket.error as e: | |
125 | if e.errno != errno.ECONNRESET: | |
126 | # Connection reset is no problem, just retry | |
127 | raise | |
128 | ||
129 | # Retry | |
130 | count += 1 | |
131 | if count <= retries: | |
132 | self.report_retry(count, retries) | |
133 | ||
134 | if count > retries: | |
135 | self.report_error('giving up after %s retries' % retries) | |
136 | return False | |
137 | ||
138 | data_len = data.info().get('Content-length', None) | |
139 | ||
140 | # Range HTTP header may be ignored/unsupported by a webserver | |
141 | # (e.g. extractor/scivee.py, extractor/bambuser.py). | |
142 | # However, for a test we still would like to download just a piece of a file. | |
143 | # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control | |
144 | # block size when downloading a file. | |
145 | if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE): | |
146 | data_len = self._TEST_FILE_SIZE | |
147 | ||
148 | if data_len is not None: | |
149 | data_len = int(data_len) + resume_len | |
150 | min_data_len = self.params.get('min_filesize') | |
151 | max_data_len = self.params.get('max_filesize') | |
152 | if min_data_len is not None and data_len < min_data_len: | |
153 | self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) | |
154 | return False | |
155 | if max_data_len is not None and data_len > max_data_len: | |
156 | self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) | |
157 | return False | |
158 | ||
159 | byte_counter = 0 + resume_len | |
160 | block_size = self.params.get('buffersize', 1024) | |
161 | start = time.time() | |
162 | ||
163 | # measure time over whole while-loop, so slow_down() and best_block_size() work together properly | |
164 | now = None # needed for slow_down() in the first loop run | |
165 | before = start # start measuring | |
166 | while True: | |
167 | ||
168 | # Download and write | |
169 | data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter)) | |
170 | byte_counter += len(data_block) | |
171 | ||
172 | # exit loop when download is finished | |
173 | if len(data_block) == 0: | |
174 | break | |
175 | ||
176 | # Open destination file just in time | |
177 | if stream is None: | |
178 | try: | |
179 | (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) | |
180 | assert stream is not None | |
181 | filename = self.undo_temp_name(tmpfilename) | |
182 | self.report_destination(filename) | |
183 | except (OSError, IOError) as err: | |
184 | self.report_error('unable to open for writing: %s' % str(err)) | |
185 | return False | |
186 | ||
187 | if self.params.get('xattr_set_filesize', False) and data_len is not None: | |
188 | try: | |
189 | write_xattr(tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8')) | |
190 | except (XAttrUnavailableError, XAttrMetadataError) as err: | |
191 | self.report_error('unable to set filesize xattr: %s' % str(err)) | |
192 | ||
193 | try: | |
194 | stream.write(data_block) | |
195 | except (IOError, OSError) as err: | |
196 | self.to_stderr('\n') | |
197 | self.report_error('unable to write data: %s' % str(err)) | |
198 | return False | |
199 | ||
200 | # Apply rate limit | |
201 | self.slow_down(start, now, byte_counter - resume_len) | |
202 | ||
203 | # end measuring of one loop run | |
204 | now = time.time() | |
205 | after = now | |
206 | ||
207 | # Adjust block size | |
208 | if not self.params.get('noresizebuffer', False): | |
209 | block_size = self.best_block_size(after - before, len(data_block)) | |
210 | ||
211 | before = after | |
212 | ||
213 | # Progress message | |
214 | speed = self.calc_speed(start, now, byte_counter - resume_len) | |
215 | if data_len is None: | |
216 | eta = None | |
217 | else: | |
218 | eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) | |
219 | ||
220 | self._hook_progress({ | |
221 | 'status': 'downloading', | |
222 | 'downloaded_bytes': byte_counter, | |
223 | 'total_bytes': data_len, | |
224 | 'tmpfilename': tmpfilename, | |
225 | 'filename': filename, | |
226 | 'eta': eta, | |
227 | 'speed': speed, | |
228 | 'elapsed': now - start, | |
229 | }) | |
230 | ||
231 | if is_test and byte_counter == data_len: | |
232 | break | |
233 | ||
234 | if stream is None: | |
235 | self.to_stderr('\n') | |
236 | self.report_error('Did not get any data blocks') | |
237 | return False | |
238 | if tmpfilename != '-': | |
239 | stream.close() | |
240 | ||
241 | if data_len is not None and byte_counter != data_len: | |
242 | raise ContentTooShortError(byte_counter, int(data_len)) | |
243 | self.try_rename(tmpfilename, filename) | |
244 | ||
245 | # Update file modification time | |
246 | if self.params.get('updatetime', True): | |
247 | info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) | |
248 | ||
249 | self._hook_progress({ | |
250 | 'downloaded_bytes': byte_counter, | |
251 | 'total_bytes': byte_counter, | |
252 | 'filename': filename, | |
253 | 'status': 'finished', | |
254 | 'elapsed': time.time() - start, | |
255 | }) | |
256 | ||
257 | return True |