]>
Commit | Line | Data |
---|---|---|
1 | #!/usr/bin/env python3 | |
2 | ||
3 | # Allow direct execution | |
4 | import os | |
5 | import sys | |
6 | import unittest | |
7 | ||
8 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | |
9 | ||
10 | ||
11 | import collections | |
12 | import hashlib | |
13 | import http.client | |
14 | import json | |
15 | import socket | |
16 | import urllib.error | |
17 | ||
18 | from test.helper import ( | |
19 | assertGreaterEqual, | |
20 | expect_info_dict, | |
21 | expect_warnings, | |
22 | get_params, | |
23 | gettestcases, | |
24 | getwebpagetestcases, | |
25 | is_download_test, | |
26 | report_warning, | |
27 | try_rm, | |
28 | ) | |
29 | ||
30 | import yt_dlp.YoutubeDL # isort: split | |
31 | from yt_dlp.extractor import get_info_extractor | |
32 | from yt_dlp.utils import ( | |
33 | DownloadError, | |
34 | ExtractorError, | |
35 | UnavailableVideoError, | |
36 | format_bytes, | |
37 | join_nonempty, | |
38 | ) | |
39 | ||
40 | RETRIES = 3 | |
41 | ||
42 | ||
43 | class YoutubeDL(yt_dlp.YoutubeDL): | |
44 | def __init__(self, *args, **kwargs): | |
45 | self.to_stderr = self.to_screen | |
46 | self.processed_info_dicts = [] | |
47 | super().__init__(*args, **kwargs) | |
48 | ||
49 | def report_warning(self, message, *args, **kwargs): | |
50 | # Don't accept warnings during tests | |
51 | raise ExtractorError(message) | |
52 | ||
53 | def process_info(self, info_dict): | |
54 | self.processed_info_dicts.append(info_dict.copy()) | |
55 | return super().process_info(info_dict) | |
56 | ||
57 | ||
58 | def _file_md5(fn): | |
59 | with open(fn, 'rb') as f: | |
60 | return hashlib.md5(f.read()).hexdigest() | |
61 | ||
62 | ||
63 | normal_test_cases = gettestcases() | |
64 | webpage_test_cases = getwebpagetestcases() | |
65 | tests_counter = collections.defaultdict(collections.Counter) | |
66 | ||
67 | ||
68 | @is_download_test | |
69 | class TestDownload(unittest.TestCase): | |
70 | # Parallel testing in nosetests. See | |
71 | # http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html | |
72 | _multiprocess_shared_ = True | |
73 | ||
74 | maxDiff = None | |
75 | ||
76 | COMPLETED_TESTS = {} | |
77 | ||
78 | def __str__(self): | |
79 | """Identify each test with the `add_ie` attribute, if available.""" | |
80 | cls, add_ie = type(self), getattr(self, self._testMethodName).add_ie | |
81 | return f'{self._testMethodName} ({cls.__module__}.{cls.__name__}){f" [{add_ie}]" if add_ie else ""}:' | |
82 | ||
83 | ||
84 | # Dynamically generate tests | |
85 | ||
86 | def generator(test_case, tname): | |
87 | def test_template(self): | |
88 | if self.COMPLETED_TESTS.get(tname): | |
89 | return | |
90 | self.COMPLETED_TESTS[tname] = True | |
91 | ie = yt_dlp.extractor.get_info_extractor(test_case['name'])() | |
92 | other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])] | |
93 | is_playlist = any(k.startswith('playlist') for k in test_case) | |
94 | test_cases = test_case.get( | |
95 | 'playlist', [] if is_playlist else [test_case]) | |
96 | ||
97 | def print_skipping(reason): | |
98 | print('Skipping %s: %s' % (test_case['name'], reason)) | |
99 | self.skipTest(reason) | |
100 | ||
101 | if not ie.working(): | |
102 | print_skipping('IE marked as not _WORKING') | |
103 | ||
104 | for tc in test_cases: | |
105 | info_dict = tc.get('info_dict', {}) | |
106 | params = tc.get('params', {}) | |
107 | if not info_dict.get('id'): | |
108 | raise Exception(f'Test {tname} definition incorrect - "id" key is not present') | |
109 | elif not info_dict.get('ext') and info_dict.get('_type', 'video') == 'video': | |
110 | if params.get('skip_download') and params.get('ignore_no_formats_error'): | |
111 | continue | |
112 | raise Exception(f'Test {tname} definition incorrect - "ext" key must be present to define the output file') | |
113 | ||
114 | if 'skip' in test_case: | |
115 | print_skipping(test_case['skip']) | |
116 | ||
117 | for other_ie in other_ies: | |
118 | if not other_ie.working(): | |
119 | print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) | |
120 | ||
121 | params = get_params(test_case.get('params', {})) | |
122 | params['outtmpl'] = tname + '_' + params['outtmpl'] | |
123 | if is_playlist and 'playlist' not in test_case: | |
124 | params.setdefault('extract_flat', 'in_playlist') | |
125 | params.setdefault('playlistend', test_case.get( | |
126 | 'playlist_mincount', test_case.get('playlist_count', -2) + 1)) | |
127 | params.setdefault('skip_download', True) | |
128 | ||
129 | ydl = YoutubeDL(params, auto_init=False) | |
130 | ydl.add_default_info_extractors() | |
131 | finished_hook_called = set() | |
132 | ||
133 | def _hook(status): | |
134 | if status['status'] == 'finished': | |
135 | finished_hook_called.add(status['filename']) | |
136 | ydl.add_progress_hook(_hook) | |
137 | expect_warnings(ydl, test_case.get('expected_warnings', [])) | |
138 | ||
139 | def get_tc_filename(tc): | |
140 | return ydl.prepare_filename(dict(tc.get('info_dict', {}))) | |
141 | ||
142 | res_dict = None | |
143 | ||
144 | def try_rm_tcs_files(tcs=None): | |
145 | if tcs is None: | |
146 | tcs = test_cases | |
147 | for tc in tcs: | |
148 | tc_filename = get_tc_filename(tc) | |
149 | try_rm(tc_filename) | |
150 | try_rm(tc_filename + '.part') | |
151 | try_rm(os.path.splitext(tc_filename)[0] + '.info.json') | |
152 | try_rm_tcs_files() | |
153 | try: | |
154 | try_num = 1 | |
155 | while True: | |
156 | try: | |
157 | # We're not using .download here since that is just a shim | |
158 | # for outside error handling, and returns the exit code | |
159 | # instead of the result dict. | |
160 | res_dict = ydl.extract_info( | |
161 | test_case['url'], | |
162 | force_generic_extractor=params.get('force_generic_extractor', False)) | |
163 | except (DownloadError, ExtractorError) as err: | |
164 | # Check if the exception is not a network related one | |
165 | if (err.exc_info[0] not in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine) | |
166 | or (err.exc_info[0] == urllib.error.HTTPError and err.exc_info[1].code == 503)): | |
167 | err.msg = f'{getattr(err, "msg", err)} ({tname})' | |
168 | raise | |
169 | ||
170 | if try_num == RETRIES: | |
171 | report_warning('%s failed due to network errors, skipping...' % tname) | |
172 | return | |
173 | ||
174 | print(f'Retrying: {try_num} failed tries\n\n##########\n\n') | |
175 | ||
176 | try_num += 1 | |
177 | else: | |
178 | break | |
179 | ||
180 | if is_playlist: | |
181 | self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video']) | |
182 | self.assertTrue('entries' in res_dict) | |
183 | expect_info_dict(self, res_dict, test_case.get('info_dict', {})) | |
184 | ||
185 | if 'playlist_mincount' in test_case: | |
186 | assertGreaterEqual( | |
187 | self, | |
188 | len(res_dict['entries']), | |
189 | test_case['playlist_mincount'], | |
190 | 'Expected at least %d in playlist %s, but got only %d' % ( | |
191 | test_case['playlist_mincount'], test_case['url'], | |
192 | len(res_dict['entries']))) | |
193 | if 'playlist_count' in test_case: | |
194 | self.assertEqual( | |
195 | len(res_dict['entries']), | |
196 | test_case['playlist_count'], | |
197 | 'Expected %d entries in playlist %s, but got %d.' % ( | |
198 | test_case['playlist_count'], | |
199 | test_case['url'], | |
200 | len(res_dict['entries']), | |
201 | )) | |
202 | if 'playlist_duration_sum' in test_case: | |
203 | got_duration = sum(e['duration'] for e in res_dict['entries']) | |
204 | self.assertEqual( | |
205 | test_case['playlist_duration_sum'], got_duration) | |
206 | ||
207 | # Generalize both playlists and single videos to unified format for | |
208 | # simplicity | |
209 | if 'entries' not in res_dict: | |
210 | res_dict['entries'] = [res_dict] | |
211 | ||
212 | for tc_num, tc in enumerate(test_cases): | |
213 | tc_res_dict = res_dict['entries'][tc_num] | |
214 | # First, check test cases' data against extracted data alone | |
215 | expect_info_dict(self, tc_res_dict, tc.get('info_dict', {})) | |
216 | if tc_res_dict.get('_type', 'video') != 'video': | |
217 | continue | |
218 | # Now, check downloaded file consistency | |
219 | tc_filename = get_tc_filename(tc) | |
220 | if not test_case.get('params', {}).get('skip_download', False): | |
221 | self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename) | |
222 | self.assertTrue(tc_filename in finished_hook_called) | |
223 | expected_minsize = tc.get('file_minsize', 10000) | |
224 | if expected_minsize is not None: | |
225 | if params.get('test'): | |
226 | expected_minsize = max(expected_minsize, 10000) | |
227 | got_fsize = os.path.getsize(tc_filename) | |
228 | assertGreaterEqual( | |
229 | self, got_fsize, expected_minsize, | |
230 | 'Expected %s to be at least %s, but it\'s only %s ' % | |
231 | (tc_filename, format_bytes(expected_minsize), | |
232 | format_bytes(got_fsize))) | |
233 | if 'md5' in tc: | |
234 | md5_for_file = _file_md5(tc_filename) | |
235 | self.assertEqual(tc['md5'], md5_for_file) | |
236 | # Finally, check test cases' data again but this time against | |
237 | # extracted data from info JSON file written during processing | |
238 | info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' | |
239 | self.assertTrue( | |
240 | os.path.exists(info_json_fn), | |
241 | 'Missing info file %s' % info_json_fn) | |
242 | with open(info_json_fn, encoding='utf-8') as infof: | |
243 | info_dict = json.load(infof) | |
244 | expect_info_dict(self, info_dict, tc.get('info_dict', {})) | |
245 | finally: | |
246 | try_rm_tcs_files() | |
247 | if is_playlist and res_dict is not None and res_dict.get('entries'): | |
248 | # Remove all other files that may have been extracted if the | |
249 | # extractor returns full results even with extract_flat | |
250 | res_tcs = [{'info_dict': e} for e in res_dict['entries']] | |
251 | try_rm_tcs_files(res_tcs) | |
252 | ||
253 | return test_template | |
254 | ||
255 | ||
256 | # And add them to TestDownload | |
257 | def inject_tests(test_cases, label=''): | |
258 | for test_case in test_cases: | |
259 | name = test_case['name'] | |
260 | tname = join_nonempty('test', name, label, tests_counter[name][label], delim='_') | |
261 | tests_counter[name][label] += 1 | |
262 | ||
263 | test_method = generator(test_case, tname) | |
264 | test_method.__name__ = tname | |
265 | test_method.add_ie = ','.join(test_case.get('add_ie', [])) | |
266 | setattr(TestDownload, test_method.__name__, test_method) | |
267 | ||
268 | ||
269 | inject_tests(normal_test_cases) | |
270 | ||
271 | # TODO: disable redirection to the IE to ensure we are actually testing the webpage extraction | |
272 | inject_tests(webpage_test_cases, 'webpage') | |
273 | ||
274 | ||
275 | def batch_generator(name): | |
276 | def test_template(self): | |
277 | for label, num_tests in tests_counter[name].items(): | |
278 | for i in range(num_tests): | |
279 | test_name = join_nonempty('test', name, label, i, delim='_') | |
280 | try: | |
281 | getattr(self, test_name)() | |
282 | except unittest.SkipTest: | |
283 | print(f'Skipped {test_name}') | |
284 | ||
285 | return test_template | |
286 | ||
287 | ||
288 | for name in tests_counter: | |
289 | test_method = batch_generator(name) | |
290 | test_method.__name__ = f'test_{name}_all' | |
291 | test_method.add_ie = '' | |
292 | setattr(TestDownload, test_method.__name__, test_method) | |
293 | del test_method | |
294 | ||
295 | ||
296 | if __name__ == '__main__': | |
297 | unittest.main() |