]> Git Repo - J-u-boot.git/blame - tools/patman/test_util.py
binman: Correct the search patch for pylibfdt
[J-u-boot.git] / tools / patman / test_util.py
CommitLineData
ff1fd6cc
SG
1# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (c) 2016 Google, Inc
4#
5
c3f94541 6from contextlib import contextmanager
ff1fd6cc 7import glob
ce0dc2ed 8import multiprocessing
ff1fd6cc
SG
9import os
10import sys
ce0dc2ed 11import unittest
ff1fd6cc 12
bf776679 13from patman import command
ff1fd6cc 14
c3a13cc3 15from io import StringIO
c3f94541 16
ce0dc2ed
SG
17use_concurrent = True
18try:
7208396b 19 from concurrencytest import ConcurrentTestSuite, fork_for_tests
ce0dc2ed
SG
20except:
21 use_concurrent = False
22
c3f94541 23
7208396b 24def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
ff1fd6cc
SG
25 """Run tests and check that we get 100% coverage
26
27 Args:
28 prog: Program to run (with be passed a '-t' argument to run tests
29 filter_fname: Normally all *.py files in the program's directory will
30 be included. If this is not None, then it is used to filter the
31 list so that only filenames that don't contain filter_fname are
32 included.
33 exclude_list: List of file patterns to exclude from the coverage
34 calculation
35 build_dir: Build directory, used to locate libfdt.py
36 required: List of modules which must be in the coverage report
37
38 Raises:
39 ValueError if the code coverage is not 100%
40 """
41 # This uses the build output from sandbox_spl to get _libfdt.so
42 path = os.path.dirname(prog)
43 if filter_fname:
44 glob_list = glob.glob(os.path.join(path, '*.py'))
45 glob_list = [fname for fname in glob_list if filter_fname in fname]
46 else:
47 glob_list = []
48 glob_list += exclude_list
9550f9ac 49 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
6bb74de7 50 test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
428e7730
SG
51 prefix = ''
52 if build_dir:
53 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
54 cmd = ('%spython3-coverage run '
7208396b
TR
55 '--omit "%s" %s %s -P1' % (prefix, ','.join(glob_list),
56 prog, test_cmd))
ff1fd6cc 57 os.system(cmd)
428e7730 58 stdout = command.Output('python3-coverage', 'report')
ff1fd6cc
SG
59 lines = stdout.splitlines()
60 if required:
61 # Convert '/path/to/name.py' just the module name 'name'
62 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
63 for line in lines if '/etype/' in line])
64 missing_list = required
e4304402 65 missing_list.discard('__init__')
ff1fd6cc
SG
66 missing_list.difference_update(test_set)
67 if missing_list:
5a1af1da
SG
68 print('Missing tests for %s' % (', '.join(missing_list)))
69 print(stdout)
ff1fd6cc
SG
70 ok = False
71
72 coverage = lines[-1].split(' ')[-1]
73 ok = True
5a1af1da 74 print(coverage)
ff1fd6cc 75 if coverage != '100%':
5a1af1da 76 print(stdout)
428e7730
SG
77 print("Type 'python3-coverage html' to get a report in "
78 'htmlcov/index.html')
5a1af1da 79 print('Coverage error: %s, but should be 100%%' % coverage)
ff1fd6cc
SG
80 ok = False
81 if not ok:
82 raise ValueError('Test coverage failure')
c3f94541
SG
83
84
85# Use this to suppress stdout/stderr output:
86# with capture_sys_output() as (stdout, stderr)
87# ...do something...
88@contextmanager
89def capture_sys_output():
90 capture_out, capture_err = StringIO(), StringIO()
91 old_out, old_err = sys.stdout, sys.stderr
92 try:
93 sys.stdout, sys.stderr = capture_out, capture_err
94 yield capture_out, capture_err
95 finally:
96 sys.stdout, sys.stderr = old_out, old_err
ce0dc2ed
SG
97
98
99def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
100 """Report the results from a suite of tests
101
102 Args:
103 toolname: Name of the tool that ran the tests
104 test_name: Name of test that was run, or None for all
105 result: A unittest.TestResult object containing the results
106 """
107 # Remove errors which just indicate a missing test. Since Python v3.5 If an
108 # ImportError or AttributeError occurs while traversing name then a
109 # synthetic test that raises that error when run will be returned. These
110 # errors are included in the errors accumulated by result.errors.
111 if test_name:
112 errors = []
113
114 for test, err in result.errors:
115 if ("has no attribute '%s'" % test_name) not in err:
116 errors.append((test, err))
117 result.testsRun -= 1
118 result.errors = errors
119
120 print(result)
121 for test, err in result.errors:
122 print(test.id(), err)
123 for test, err in result.failures:
124 print(err, result.failures)
125 if result.skipped:
0b3d24a7
SG
126 print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
127 's' if len(result.skipped) > 1 else ''))
ce0dc2ed
SG
128 for skip_info in result.skipped:
129 print('%s: %s' % (skip_info[0], skip_info[1]))
130 if result.errors or result.failures:
0b3d24a7 131 print('%s tests FAILED' % toolname)
ce0dc2ed
SG
132 return 1
133 return 0
134
135
136def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
137 test_name, toolpath, test_class_list):
138 """Run a series of test suites and collect the results
139
140 Args:
141 result: A unittest.TestResult object to add the results to
142 debug: True to enable debugging, which shows a full stack trace on error
143 verbosity: Verbosity level to use (0-4)
144 test_preserve_dirs: True to preserve the input directory used by tests
145 so that it can be examined afterwards (only useful for debugging
146 tests). If a single test is selected (in args[0]) it also preserves
147 the output directory for this test. Both directories are displayed
148 on the command line.
149 processes: Number of processes to use to run tests (None=same as #CPUs)
150 test_name: Name of test to run, or None for all
151 toolpath: List of paths to use for tools
152 test_class_list: List of test classes to run
153 """
154 for module in []:
155 suite = doctest.DocTestSuite(module)
156 suite.run(result)
157
158 sys.argv = [sys.argv[0]]
159 if debug:
160 sys.argv.append('-D')
161 if verbosity:
162 sys.argv.append('-v%d' % verbosity)
163 if toolpath:
164 for path in toolpath:
165 sys.argv += ['--toolpath', path]
166
167 suite = unittest.TestSuite()
168 loader = unittest.TestLoader()
169 for module in test_class_list:
170 # Test the test module about our arguments, if it is interested
171 if hasattr(module, 'setup_test_args'):
172 setup_test_args = getattr(module, 'setup_test_args')
173 setup_test_args(preserve_indir=test_preserve_dirs,
174 preserve_outdirs=test_preserve_dirs and test_name is not None,
175 toolpath=toolpath, verbosity=verbosity)
176 if test_name:
177 try:
178 suite.addTests(loader.loadTestsFromName(test_name, module))
179 except AttributeError:
180 continue
181 else:
182 suite.addTests(loader.loadTestsFromTestCase(module))
183 if use_concurrent and processes != 1:
184 concurrent_suite = ConcurrentTestSuite(suite,
185 fork_for_tests(processes or multiprocessing.cpu_count()))
186 concurrent_suite.run(result)
187 else:
188 suite.run(result)
This page took 0.1158 seconds and 4 git commands to generate.