]>
Commit | Line | Data |
---|---|---|
1 | #!/usr/bin/env python3 | |
2 | ||
3 | # Allow direct execution | |
4 | import os | |
5 | import sys | |
6 | import unittest | |
7 | ||
8 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | |
9 | ||
10 | ||
11 | import collections | |
12 | import hashlib | |
13 | import json | |
14 | ||
15 | from test.helper import ( | |
16 | assertGreaterEqual, | |
17 | expect_info_dict, | |
18 | expect_warnings, | |
19 | get_params, | |
20 | gettestcases, | |
21 | getwebpagetestcases, | |
22 | is_download_test, | |
23 | report_warning, | |
24 | try_rm, | |
25 | ) | |
26 | ||
27 | import yt_dlp.YoutubeDL # isort: split | |
28 | from yt_dlp.extractor import get_info_extractor | |
29 | from yt_dlp.networking.exceptions import HTTPError, TransportError | |
30 | from yt_dlp.utils import ( | |
31 | DownloadError, | |
32 | ExtractorError, | |
33 | UnavailableVideoError, | |
34 | format_bytes, | |
35 | join_nonempty, | |
36 | ) | |
37 | ||
38 | RETRIES = 3 | |
39 | ||
40 | ||
41 | class YoutubeDL(yt_dlp.YoutubeDL): | |
42 | def __init__(self, *args, **kwargs): | |
43 | self.to_stderr = self.to_screen | |
44 | self.processed_info_dicts = [] | |
45 | super().__init__(*args, **kwargs) | |
46 | ||
47 | def report_warning(self, message, *args, **kwargs): | |
48 | # Don't accept warnings during tests | |
49 | raise ExtractorError(message) | |
50 | ||
51 | def process_info(self, info_dict): | |
52 | self.processed_info_dicts.append(info_dict.copy()) | |
53 | return super().process_info(info_dict) | |
54 | ||
55 | ||
56 | def _file_md5(fn): | |
57 | with open(fn, 'rb') as f: | |
58 | return hashlib.md5(f.read()).hexdigest() | |
59 | ||
60 | ||
61 | normal_test_cases = gettestcases() | |
62 | webpage_test_cases = getwebpagetestcases() | |
63 | tests_counter = collections.defaultdict(collections.Counter) | |
64 | ||
65 | ||
66 | @is_download_test | |
67 | class TestDownload(unittest.TestCase): | |
68 | # Parallel testing in nosetests. See | |
69 | # http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html | |
70 | _multiprocess_shared_ = True | |
71 | ||
72 | maxDiff = None | |
73 | ||
74 | COMPLETED_TESTS = {} | |
75 | ||
76 | def __str__(self): | |
77 | """Identify each test with the `add_ie` attribute, if available.""" | |
78 | cls, add_ie = type(self), getattr(self, self._testMethodName).add_ie | |
79 | return f'{self._testMethodName} ({cls.__module__}.{cls.__name__}){f" [{add_ie}]" if add_ie else ""}:' | |
80 | ||
81 | ||
82 | # Dynamically generate tests | |
83 | ||
84 | def generator(test_case, tname): | |
85 | def test_template(self): | |
86 | if self.COMPLETED_TESTS.get(tname): | |
87 | return | |
88 | self.COMPLETED_TESTS[tname] = True | |
89 | ie = yt_dlp.extractor.get_info_extractor(test_case['name'])() | |
90 | other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])] | |
91 | is_playlist = any(k.startswith('playlist') for k in test_case) | |
92 | test_cases = test_case.get( | |
93 | 'playlist', [] if is_playlist else [test_case]) | |
94 | ||
95 | def print_skipping(reason): | |
96 | print('Skipping %s: %s' % (test_case['name'], reason)) | |
97 | self.skipTest(reason) | |
98 | ||
99 | if not ie.working(): | |
100 | print_skipping('IE marked as not _WORKING') | |
101 | ||
102 | for tc in test_cases: | |
103 | info_dict = tc.get('info_dict', {}) | |
104 | params = tc.get('params', {}) | |
105 | if not info_dict.get('id'): | |
106 | raise Exception(f'Test {tname} definition incorrect - "id" key is not present') | |
107 | elif not info_dict.get('ext') and info_dict.get('_type', 'video') == 'video': | |
108 | if params.get('skip_download') and params.get('ignore_no_formats_error'): | |
109 | continue | |
110 | raise Exception(f'Test {tname} definition incorrect - "ext" key must be present to define the output file') | |
111 | ||
112 | if 'skip' in test_case: | |
113 | print_skipping(test_case['skip']) | |
114 | ||
115 | for other_ie in other_ies: | |
116 | if not other_ie.working(): | |
117 | print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) | |
118 | ||
119 | params = get_params(test_case.get('params', {})) | |
120 | params['outtmpl'] = tname + '_' + params['outtmpl'] | |
121 | if is_playlist and 'playlist' not in test_case: | |
122 | params.setdefault('extract_flat', 'in_playlist') | |
123 | params.setdefault('playlistend', test_case.get( | |
124 | 'playlist_mincount', test_case.get('playlist_count', -2) + 1)) | |
125 | params.setdefault('skip_download', True) | |
126 | ||
127 | ydl = YoutubeDL(params, auto_init=False) | |
128 | ydl.add_default_info_extractors() | |
129 | finished_hook_called = set() | |
130 | ||
131 | def _hook(status): | |
132 | if status['status'] == 'finished': | |
133 | finished_hook_called.add(status['filename']) | |
134 | ydl.add_progress_hook(_hook) | |
135 | expect_warnings(ydl, test_case.get('expected_warnings', [])) | |
136 | ||
137 | def get_tc_filename(tc): | |
138 | return ydl.prepare_filename(dict(tc.get('info_dict', {}))) | |
139 | ||
140 | res_dict = None | |
141 | ||
142 | def try_rm_tcs_files(tcs=None): | |
143 | if tcs is None: | |
144 | tcs = test_cases | |
145 | for tc in tcs: | |
146 | tc_filename = get_tc_filename(tc) | |
147 | try_rm(tc_filename) | |
148 | try_rm(tc_filename + '.part') | |
149 | try_rm(os.path.splitext(tc_filename)[0] + '.info.json') | |
150 | try_rm_tcs_files() | |
151 | try: | |
152 | try_num = 1 | |
153 | while True: | |
154 | try: | |
155 | # We're not using .download here since that is just a shim | |
156 | # for outside error handling, and returns the exit code | |
157 | # instead of the result dict. | |
158 | res_dict = ydl.extract_info( | |
159 | test_case['url'], | |
160 | force_generic_extractor=params.get('force_generic_extractor', False)) | |
161 | except (DownloadError, ExtractorError) as err: | |
162 | # Check if the exception is not a network related one | |
163 | if not isinstance(err.exc_info[1], (TransportError, UnavailableVideoError)) or (isinstance(err.exc_info[1], HTTPError) and err.exc_info[1].code == 503): | |
164 | err.msg = f'{getattr(err, "msg", err)} ({tname})' | |
165 | raise | |
166 | ||
167 | if try_num == RETRIES: | |
168 | report_warning('%s failed due to network errors, skipping...' % tname) | |
169 | return | |
170 | ||
171 | print(f'Retrying: {try_num} failed tries\n\n##########\n\n') | |
172 | ||
173 | try_num += 1 | |
174 | else: | |
175 | break | |
176 | ||
177 | if is_playlist: | |
178 | self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video']) | |
179 | self.assertTrue('entries' in res_dict) | |
180 | expect_info_dict(self, res_dict, test_case.get('info_dict', {})) | |
181 | ||
182 | if 'playlist_mincount' in test_case: | |
183 | assertGreaterEqual( | |
184 | self, | |
185 | len(res_dict['entries']), | |
186 | test_case['playlist_mincount'], | |
187 | 'Expected at least %d in playlist %s, but got only %d' % ( | |
188 | test_case['playlist_mincount'], test_case['url'], | |
189 | len(res_dict['entries']))) | |
190 | if 'playlist_count' in test_case: | |
191 | self.assertEqual( | |
192 | len(res_dict['entries']), | |
193 | test_case['playlist_count'], | |
194 | 'Expected %d entries in playlist %s, but got %d.' % ( | |
195 | test_case['playlist_count'], | |
196 | test_case['url'], | |
197 | len(res_dict['entries']), | |
198 | )) | |
199 | if 'playlist_duration_sum' in test_case: | |
200 | got_duration = sum(e['duration'] for e in res_dict['entries']) | |
201 | self.assertEqual( | |
202 | test_case['playlist_duration_sum'], got_duration) | |
203 | ||
204 | # Generalize both playlists and single videos to unified format for | |
205 | # simplicity | |
206 | if 'entries' not in res_dict: | |
207 | res_dict['entries'] = [res_dict] | |
208 | ||
209 | for tc_num, tc in enumerate(test_cases): | |
210 | tc_res_dict = res_dict['entries'][tc_num] | |
211 | # First, check test cases' data against extracted data alone | |
212 | expect_info_dict(self, tc_res_dict, tc.get('info_dict', {})) | |
213 | if tc_res_dict.get('_type', 'video') != 'video': | |
214 | continue | |
215 | # Now, check downloaded file consistency | |
216 | tc_filename = get_tc_filename(tc) | |
217 | if not test_case.get('params', {}).get('skip_download', False): | |
218 | self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename) | |
219 | self.assertTrue(tc_filename in finished_hook_called) | |
220 | expected_minsize = tc.get('file_minsize', 10000) | |
221 | if expected_minsize is not None: | |
222 | if params.get('test'): | |
223 | expected_minsize = max(expected_minsize, 10000) | |
224 | got_fsize = os.path.getsize(tc_filename) | |
225 | assertGreaterEqual( | |
226 | self, got_fsize, expected_minsize, | |
227 | 'Expected %s to be at least %s, but it\'s only %s ' % | |
228 | (tc_filename, format_bytes(expected_minsize), | |
229 | format_bytes(got_fsize))) | |
230 | if 'md5' in tc: | |
231 | md5_for_file = _file_md5(tc_filename) | |
232 | self.assertEqual(tc['md5'], md5_for_file) | |
233 | # Finally, check test cases' data again but this time against | |
234 | # extracted data from info JSON file written during processing | |
235 | info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' | |
236 | self.assertTrue( | |
237 | os.path.exists(info_json_fn), | |
238 | 'Missing info file %s' % info_json_fn) | |
239 | with open(info_json_fn, encoding='utf-8') as infof: | |
240 | info_dict = json.load(infof) | |
241 | expect_info_dict(self, info_dict, tc.get('info_dict', {})) | |
242 | finally: | |
243 | try_rm_tcs_files() | |
244 | if is_playlist and res_dict is not None and res_dict.get('entries'): | |
245 | # Remove all other files that may have been extracted if the | |
246 | # extractor returns full results even with extract_flat | |
247 | res_tcs = [{'info_dict': e} for e in res_dict['entries']] | |
248 | try_rm_tcs_files(res_tcs) | |
249 | ydl.close() | |
250 | return test_template | |
251 | ||
252 | ||
253 | # And add them to TestDownload | |
254 | def inject_tests(test_cases, label=''): | |
255 | for test_case in test_cases: | |
256 | name = test_case['name'] | |
257 | tname = join_nonempty('test', name, label, tests_counter[name][label], delim='_') | |
258 | tests_counter[name][label] += 1 | |
259 | ||
260 | test_method = generator(test_case, tname) | |
261 | test_method.__name__ = tname | |
262 | test_method.add_ie = ','.join(test_case.get('add_ie', [])) | |
263 | setattr(TestDownload, test_method.__name__, test_method) | |
264 | ||
265 | ||
266 | inject_tests(normal_test_cases) | |
267 | ||
268 | # TODO: disable redirection to the IE to ensure we are actually testing the webpage extraction | |
269 | inject_tests(webpage_test_cases, 'webpage') | |
270 | ||
271 | ||
272 | def batch_generator(name): | |
273 | def test_template(self): | |
274 | for label, num_tests in tests_counter[name].items(): | |
275 | for i in range(num_tests): | |
276 | test_name = join_nonempty('test', name, label, i, delim='_') | |
277 | try: | |
278 | getattr(self, test_name)() | |
279 | except unittest.SkipTest: | |
280 | print(f'Skipped {test_name}') | |
281 | ||
282 | return test_template | |
283 | ||
284 | ||
285 | for name in tests_counter: | |
286 | test_method = batch_generator(name) | |
287 | test_method.__name__ = f'test_{name}_all' | |
288 | test_method.add_ie = '' | |
289 | setattr(TestDownload, test_method.__name__, test_method) | |
290 | del test_method | |
291 | ||
292 | ||
293 | if __name__ == '__main__': | |
294 | unittest.main() |