]> jfr.im git - yt-dlp.git/blob - test/test_download.py
Update to ytdl-commit-e6a836d
[yt-dlp.git] / test / test_download.py
1 #!/usr/bin/env python3
2
3 # Allow direct execution
4 import os
5 import sys
6 import unittest
7
8 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
9
10
11 import collections
12 import hashlib
13 import http.client
14 import json
15 import socket
16 import urllib.error
17
18 from test.helper import (
19 assertGreaterEqual,
20 expect_info_dict,
21 expect_warnings,
22 get_params,
23 gettestcases,
24 getwebpagetestcases,
25 is_download_test,
26 report_warning,
27 try_rm,
28 )
29
30 import yt_dlp.YoutubeDL # isort: split
31 from yt_dlp.extractor import get_info_extractor
32 from yt_dlp.utils import (
33 DownloadError,
34 ExtractorError,
35 UnavailableVideoError,
36 format_bytes,
37 join_nonempty,
38 )
39
40 RETRIES = 3
41
42
43 class YoutubeDL(yt_dlp.YoutubeDL):
44 def __init__(self, *args, **kwargs):
45 self.to_stderr = self.to_screen
46 self.processed_info_dicts = []
47 super().__init__(*args, **kwargs)
48
49 def report_warning(self, message, *args, **kwargs):
50 # Don't accept warnings during tests
51 raise ExtractorError(message)
52
53 def process_info(self, info_dict):
54 self.processed_info_dicts.append(info_dict.copy())
55 return super().process_info(info_dict)
56
57
58 def _file_md5(fn):
59 with open(fn, 'rb') as f:
60 return hashlib.md5(f.read()).hexdigest()
61
62
63 normal_test_cases = gettestcases()
64 webpage_test_cases = getwebpagetestcases()
65 tests_counter = collections.defaultdict(collections.Counter)
66
67
68 @is_download_test
69 class TestDownload(unittest.TestCase):
70 # Parallel testing in nosetests. See
71 # http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html
72 _multiprocess_shared_ = True
73
74 maxDiff = None
75
76 COMPLETED_TESTS = {}
77
78 def __str__(self):
79 """Identify each test with the `add_ie` attribute, if available."""
80 cls, add_ie = type(self), getattr(self, self._testMethodName).add_ie
81 return f'{self._testMethodName} ({cls.__module__}.{cls.__name__}){f" [{add_ie}]" if add_ie else ""}:'
82
83
84 # Dynamically generate tests
85
86 def generator(test_case, tname):
87 def test_template(self):
88 if self.COMPLETED_TESTS.get(tname):
89 return
90 self.COMPLETED_TESTS[tname] = True
91 ie = yt_dlp.extractor.get_info_extractor(test_case['name'])()
92 other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
93 is_playlist = any(k.startswith('playlist') for k in test_case)
94 test_cases = test_case.get(
95 'playlist', [] if is_playlist else [test_case])
96
97 def print_skipping(reason):
98 print('Skipping %s: %s' % (test_case['name'], reason))
99 self.skipTest(reason)
100
101 if not ie.working():
102 print_skipping('IE marked as not _WORKING')
103
104 for tc in test_cases:
105 info_dict = tc.get('info_dict', {})
106 params = tc.get('params', {})
107 if not info_dict.get('id'):
108 raise Exception(f'Test {tname} definition incorrect - "id" key is not present')
109 elif not info_dict.get('ext'):
110 if params.get('skip_download') and params.get('ignore_no_formats_error'):
111 continue
112 raise Exception(f'Test {tname} definition incorrect - "ext" key must be present to define the output file')
113
114 if 'skip' in test_case:
115 print_skipping(test_case['skip'])
116
117 for other_ie in other_ies:
118 if not other_ie.working():
119 print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
120
121 params = get_params(test_case.get('params', {}))
122 params['outtmpl'] = tname + '_' + params['outtmpl']
123 if is_playlist and 'playlist' not in test_case:
124 params.setdefault('extract_flat', 'in_playlist')
125 params.setdefault('playlistend', test_case.get('playlist_mincount'))
126 params.setdefault('skip_download', True)
127
128 ydl = YoutubeDL(params, auto_init=False)
129 ydl.add_default_info_extractors()
130 finished_hook_called = set()
131
132 def _hook(status):
133 if status['status'] == 'finished':
134 finished_hook_called.add(status['filename'])
135 ydl.add_progress_hook(_hook)
136 expect_warnings(ydl, test_case.get('expected_warnings', []))
137
138 def get_tc_filename(tc):
139 return ydl.prepare_filename(dict(tc.get('info_dict', {})))
140
141 res_dict = None
142
143 def try_rm_tcs_files(tcs=None):
144 if tcs is None:
145 tcs = test_cases
146 for tc in tcs:
147 tc_filename = get_tc_filename(tc)
148 try_rm(tc_filename)
149 try_rm(tc_filename + '.part')
150 try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
151 try_rm_tcs_files()
152 try:
153 try_num = 1
154 while True:
155 try:
156 # We're not using .download here since that is just a shim
157 # for outside error handling, and returns the exit code
158 # instead of the result dict.
159 res_dict = ydl.extract_info(
160 test_case['url'],
161 force_generic_extractor=params.get('force_generic_extractor', False))
162 except (DownloadError, ExtractorError) as err:
163 # Check if the exception is not a network related one
164 if (err.exc_info[0] not in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine)
165 or (err.exc_info[0] == urllib.error.HTTPError and err.exc_info[1].code == 503)):
166 err.msg = f'{getattr(err, "msg", err)} ({tname})'
167 raise
168
169 if try_num == RETRIES:
170 report_warning('%s failed due to network errors, skipping...' % tname)
171 return
172
173 print(f'Retrying: {try_num} failed tries\n\n##########\n\n')
174
175 try_num += 1
176 else:
177 break
178
179 if is_playlist:
180 self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video'])
181 self.assertTrue('entries' in res_dict)
182 expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
183
184 if 'playlist_mincount' in test_case:
185 assertGreaterEqual(
186 self,
187 len(res_dict['entries']),
188 test_case['playlist_mincount'],
189 'Expected at least %d in playlist %s, but got only %d' % (
190 test_case['playlist_mincount'], test_case['url'],
191 len(res_dict['entries'])))
192 if 'playlist_count' in test_case:
193 self.assertEqual(
194 len(res_dict['entries']),
195 test_case['playlist_count'],
196 'Expected %d entries in playlist %s, but got %d.' % (
197 test_case['playlist_count'],
198 test_case['url'],
199 len(res_dict['entries']),
200 ))
201 if 'playlist_duration_sum' in test_case:
202 got_duration = sum(e['duration'] for e in res_dict['entries'])
203 self.assertEqual(
204 test_case['playlist_duration_sum'], got_duration)
205
206 # Generalize both playlists and single videos to unified format for
207 # simplicity
208 if 'entries' not in res_dict:
209 res_dict['entries'] = [res_dict]
210
211 for tc_num, tc in enumerate(test_cases):
212 tc_res_dict = res_dict['entries'][tc_num]
213 # First, check test cases' data against extracted data alone
214 expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
215 # Now, check downloaded file consistency
216 tc_filename = get_tc_filename(tc)
217 if not test_case.get('params', {}).get('skip_download', False):
218 self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
219 self.assertTrue(tc_filename in finished_hook_called)
220 expected_minsize = tc.get('file_minsize', 10000)
221 if expected_minsize is not None:
222 if params.get('test'):
223 expected_minsize = max(expected_minsize, 10000)
224 got_fsize = os.path.getsize(tc_filename)
225 assertGreaterEqual(
226 self, got_fsize, expected_minsize,
227 'Expected %s to be at least %s, but it\'s only %s ' %
228 (tc_filename, format_bytes(expected_minsize),
229 format_bytes(got_fsize)))
230 if 'md5' in tc:
231 md5_for_file = _file_md5(tc_filename)
232 self.assertEqual(tc['md5'], md5_for_file)
233 # Finally, check test cases' data again but this time against
234 # extracted data from info JSON file written during processing
235 info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
236 self.assertTrue(
237 os.path.exists(info_json_fn),
238 'Missing info file %s' % info_json_fn)
239 with open(info_json_fn, encoding='utf-8') as infof:
240 info_dict = json.load(infof)
241 expect_info_dict(self, info_dict, tc.get('info_dict', {}))
242 finally:
243 try_rm_tcs_files()
244 if is_playlist and res_dict is not None and res_dict.get('entries'):
245 # Remove all other files that may have been extracted if the
246 # extractor returns full results even with extract_flat
247 res_tcs = [{'info_dict': e} for e in res_dict['entries']]
248 try_rm_tcs_files(res_tcs)
249
250 return test_template
251
252
253 # And add them to TestDownload
254 def inject_tests(test_cases, label=''):
255 for test_case in test_cases:
256 name = test_case['name']
257 tname = join_nonempty('test', name, label, tests_counter[name][label], delim='_')
258 tests_counter[name][label] += 1
259
260 test_method = generator(test_case, tname)
261 test_method.__name__ = tname
262 test_method.add_ie = ','.join(test_case.get('add_ie', []))
263 setattr(TestDownload, test_method.__name__, test_method)
264
265
266 inject_tests(normal_test_cases)
267
268 # TODO: disable redirection to the IE to ensure we are actually testing the webpage extraction
269 inject_tests(webpage_test_cases, 'webpage')
270
271
272 def batch_generator(name):
273 def test_template(self):
274 for label, num_tests in tests_counter[name].items():
275 for i in range(num_tests):
276 test_name = join_nonempty('test', name, label, i, delim='_')
277 try:
278 getattr(self, test_name)()
279 except unittest.SkipTest:
280 print(f'Skipped {test_name}')
281
282 return test_template
283
284
285 for name in tests_counter:
286 test_method = batch_generator(name)
287 test_method.__name__ = f'test_{name}_all'
288 test_method.add_ie = ''
289 setattr(TestDownload, test_method.__name__, test_method)
290 del test_method
291
292
293 if __name__ == '__main__':
294 unittest.main()