]>
Commit | Line | Data |
---|---|---|
ff1fd6cc SG |
1 | # SPDX-License-Identifier: GPL-2.0+ |
2 | # | |
3 | # Copyright (c) 2016 Google, Inc | |
4 | # | |
5 | ||
c3f94541 | 6 | from contextlib import contextmanager |
1d0f30e9 | 7 | import doctest |
ff1fd6cc | 8 | import glob |
ce0dc2ed | 9 | import multiprocessing |
ff1fd6cc SG |
10 | import os |
11 | import sys | |
ce0dc2ed | 12 | import unittest |
ff1fd6cc | 13 | |
bf776679 | 14 | from patman import command |
ff1fd6cc | 15 | |
c3a13cc3 | 16 | from io import StringIO |
c3f94541 | 17 | |
ce0dc2ed SG |
18 | use_concurrent = True |
19 | try: | |
347e0f00 SG |
20 | from concurrencytest.concurrencytest import ConcurrentTestSuite |
21 | from concurrencytest.concurrencytest import fork_for_tests | |
ce0dc2ed SG |
22 | except: |
23 | use_concurrent = False | |
24 | ||
c3f94541 | 25 | |
32eb66d2 SG |
26 | def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None, |
27 | extra_args=None): | |
ff1fd6cc SG |
28 | """Run tests and check that we get 100% coverage |
29 | ||
30 | Args: | |
31 | prog: Program to run (with be passed a '-t' argument to run tests | |
32 | filter_fname: Normally all *.py files in the program's directory will | |
33 | be included. If this is not None, then it is used to filter the | |
34 | list so that only filenames that don't contain filter_fname are | |
35 | included. | |
36 | exclude_list: List of file patterns to exclude from the coverage | |
37 | calculation | |
38 | build_dir: Build directory, used to locate libfdt.py | |
39 | required: List of modules which must be in the coverage report | |
32eb66d2 SG |
40 | extra_args (str): Extra arguments to pass to the tool before the -t/test |
41 | arg | |
ff1fd6cc SG |
42 | |
43 | Raises: | |
44 | ValueError if the code coverage is not 100% | |
45 | """ | |
46 | # This uses the build output from sandbox_spl to get _libfdt.so | |
47 | path = os.path.dirname(prog) | |
48 | if filter_fname: | |
49 | glob_list = glob.glob(os.path.join(path, '*.py')) | |
50 | glob_list = [fname for fname in glob_list if filter_fname in fname] | |
51 | else: | |
52 | glob_list = [] | |
53 | glob_list += exclude_list | |
9550f9ac | 54 | glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*'] |
347e0f00 | 55 | glob_list += ['*concurrencytest*'] |
6bb74de7 | 56 | test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t' |
428e7730 SG |
57 | prefix = '' |
58 | if build_dir: | |
59 | prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir | |
60 | cmd = ('%spython3-coverage run ' | |
32eb66d2 SG |
61 | '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list), |
62 | prog, extra_args or '', test_cmd)) | |
ff1fd6cc | 63 | os.system(cmd) |
428e7730 | 64 | stdout = command.Output('python3-coverage', 'report') |
ff1fd6cc SG |
65 | lines = stdout.splitlines() |
66 | if required: | |
67 | # Convert '/path/to/name.py' just the module name 'name' | |
68 | test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0] | |
69 | for line in lines if '/etype/' in line]) | |
70 | missing_list = required | |
e4304402 | 71 | missing_list.discard('__init__') |
ff1fd6cc SG |
72 | missing_list.difference_update(test_set) |
73 | if missing_list: | |
5a1af1da SG |
74 | print('Missing tests for %s' % (', '.join(missing_list))) |
75 | print(stdout) | |
ff1fd6cc SG |
76 | ok = False |
77 | ||
78 | coverage = lines[-1].split(' ')[-1] | |
79 | ok = True | |
5a1af1da | 80 | print(coverage) |
ff1fd6cc | 81 | if coverage != '100%': |
5a1af1da | 82 | print(stdout) |
428e7730 SG |
83 | print("Type 'python3-coverage html' to get a report in " |
84 | 'htmlcov/index.html') | |
5a1af1da | 85 | print('Coverage error: %s, but should be 100%%' % coverage) |
ff1fd6cc SG |
86 | ok = False |
87 | if not ok: | |
88 | raise ValueError('Test coverage failure') | |
c3f94541 SG |
89 | |
90 | ||
91 | # Use this to suppress stdout/stderr output: | |
92 | # with capture_sys_output() as (stdout, stderr) | |
93 | # ...do something... | |
94 | @contextmanager | |
95 | def capture_sys_output(): | |
96 | capture_out, capture_err = StringIO(), StringIO() | |
97 | old_out, old_err = sys.stdout, sys.stderr | |
98 | try: | |
99 | sys.stdout, sys.stderr = capture_out, capture_err | |
100 | yield capture_out, capture_err | |
101 | finally: | |
102 | sys.stdout, sys.stderr = old_out, old_err | |
ce0dc2ed SG |
103 | |
104 | ||
105 | def ReportResult(toolname:str, test_name: str, result: unittest.TestResult): | |
106 | """Report the results from a suite of tests | |
107 | ||
108 | Args: | |
109 | toolname: Name of the tool that ran the tests | |
110 | test_name: Name of test that was run, or None for all | |
111 | result: A unittest.TestResult object containing the results | |
112 | """ | |
113 | # Remove errors which just indicate a missing test. Since Python v3.5 If an | |
114 | # ImportError or AttributeError occurs while traversing name then a | |
115 | # synthetic test that raises that error when run will be returned. These | |
116 | # errors are included in the errors accumulated by result.errors. | |
117 | if test_name: | |
118 | errors = [] | |
119 | ||
120 | for test, err in result.errors: | |
121 | if ("has no attribute '%s'" % test_name) not in err: | |
122 | errors.append((test, err)) | |
123 | result.testsRun -= 1 | |
124 | result.errors = errors | |
125 | ||
126 | print(result) | |
127 | for test, err in result.errors: | |
128 | print(test.id(), err) | |
129 | for test, err in result.failures: | |
130 | print(err, result.failures) | |
131 | if result.skipped: | |
0b3d24a7 SG |
132 | print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname, |
133 | 's' if len(result.skipped) > 1 else '')) | |
ce0dc2ed SG |
134 | for skip_info in result.skipped: |
135 | print('%s: %s' % (skip_info[0], skip_info[1])) | |
136 | if result.errors or result.failures: | |
0b3d24a7 | 137 | print('%s tests FAILED' % toolname) |
ce0dc2ed SG |
138 | return 1 |
139 | return 0 | |
140 | ||
141 | ||
142 | def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes, | |
1d0f30e9 | 143 | test_name, toolpath, class_and_module_list): |
ce0dc2ed SG |
144 | """Run a series of test suites and collect the results |
145 | ||
146 | Args: | |
147 | result: A unittest.TestResult object to add the results to | |
148 | debug: True to enable debugging, which shows a full stack trace on error | |
149 | verbosity: Verbosity level to use (0-4) | |
150 | test_preserve_dirs: True to preserve the input directory used by tests | |
151 | so that it can be examined afterwards (only useful for debugging | |
152 | tests). If a single test is selected (in args[0]) it also preserves | |
153 | the output directory for this test. Both directories are displayed | |
154 | on the command line. | |
155 | processes: Number of processes to use to run tests (None=same as #CPUs) | |
156 | test_name: Name of test to run, or None for all | |
157 | toolpath: List of paths to use for tools | |
1d0f30e9 SG |
158 | class_and_module_list: List of test classes (type class) and module |
159 | names (type str) to run | |
ce0dc2ed | 160 | """ |
1d0f30e9 SG |
161 | for module in class_and_module_list: |
162 | if isinstance(module, str) and (not test_name or test_name == module): | |
163 | suite = doctest.DocTestSuite(module) | |
164 | suite.run(result) | |
ce0dc2ed SG |
165 | |
166 | sys.argv = [sys.argv[0]] | |
167 | if debug: | |
168 | sys.argv.append('-D') | |
169 | if verbosity: | |
170 | sys.argv.append('-v%d' % verbosity) | |
171 | if toolpath: | |
172 | for path in toolpath: | |
173 | sys.argv += ['--toolpath', path] | |
174 | ||
175 | suite = unittest.TestSuite() | |
176 | loader = unittest.TestLoader() | |
1d0f30e9 SG |
177 | for module in class_and_module_list: |
178 | if isinstance(module, str): | |
179 | continue | |
ce0dc2ed SG |
180 | # Test the test module about our arguments, if it is interested |
181 | if hasattr(module, 'setup_test_args'): | |
182 | setup_test_args = getattr(module, 'setup_test_args') | |
183 | setup_test_args(preserve_indir=test_preserve_dirs, | |
184 | preserve_outdirs=test_preserve_dirs and test_name is not None, | |
185 | toolpath=toolpath, verbosity=verbosity) | |
186 | if test_name: | |
187 | try: | |
188 | suite.addTests(loader.loadTestsFromName(test_name, module)) | |
189 | except AttributeError: | |
190 | continue | |
191 | else: | |
192 | suite.addTests(loader.loadTestsFromTestCase(module)) | |
193 | if use_concurrent and processes != 1: | |
194 | concurrent_suite = ConcurrentTestSuite(suite, | |
195 | fork_for_tests(processes or multiprocessing.cpu_count())) | |
196 | concurrent_suite.run(result) | |
197 | else: | |
198 | suite.run(result) |