Index: scripts/slave/unittests/expect_tests/main.py |
diff --git a/scripts/slave/unittests/expect_tests/main.py b/scripts/slave/unittests/expect_tests/main.py |
index 5a266b1136eea7e6caff7030d0a86a12a56c3bfe..ba10440d7bc4abaf86e6becd2a3c0af1648520dd 100644 |
--- a/scripts/slave/unittests/expect_tests/main.py |
+++ b/scripts/slave/unittests/expect_tests/main.py |
@@ -4,14 +4,20 @@ |
import argparse |
import multiprocessing |
+import pkgutil |
import sys |
from .cover import CoverageContext |
-from . import handle_list, handle_debug, handle_train, handle_test |
+from . import handle_list, handle_debug, handle_train, handle_test, util |
from .pipeline import result_loop |
+from .unittest_helper import _is_unittest, UnittestTestCase |
+ |
+ |
+ALL_MODULES = object() |
+ |
HANDLERS = { |
'list': handle_list.ListHandler, |
@@ -35,8 +41,8 @@ class _test_completer(object): |
for k, v in kwargs.iteritems(): |
setattr(self, k, v) |
- def __init__(self, gen): |
- self._gen = gen |
+ def __init__(self, test_modules): |
+ self._test_modules = test_modules |
def __call__(self, prefix, **_): |
handle_list.ListHandler.COMPLETION_LIST = [] |
@@ -45,12 +51,13 @@ class _test_completer(object): |
test_glob=[prefix], |
jobs=1, |
) |
- ctx = CoverageContext('', [], [], False, None, None, False) |
- result_loop(self._gen, ctx.create_subprocess_context(), options) |
+ ctx = CoverageContext(False, False, False) |
+ test_gens = get_test_gens(self._test_modules) |
+ result_loop(test_gens, ctx.create_subprocess_context(), options) |
return handle_list.ListHandler.COMPLETION_LIST |
-def _parse_args(args, test_gen): |
+def _parse_args(args, test_modules): |
args = args or sys.argv[1:] |
# Set the default mode if not specified and not passing --help |
@@ -83,6 +90,10 @@ def _parse_args(args, test_gen): |
default=multiprocessing.cpu_count(), |
help='run N jobs in parallel (default %(default)s)') |
+ sp.add_argument( |
+ '--force_coverage', action='store_true', |
+ help='Enable coverage report even when specifying a test filter.') |
+ |
sp.add_argument( |
'--test_list', metavar='FILE', |
help='take the list of test globs from the FILE (use "-" for stdin)' |
@@ -99,7 +110,7 @@ def _parse_args(args, test_gen): |
'then it acts as a negation glob and anything which matches it ' |
'will be skipped. If a glob doesn\'t have "*" in it, "*" will be ' |
'implicitly appended to the end') |
- ).completer = _test_completer(test_gen) |
+ ).completer = _test_completer(test_modules) |
opts = parser.parse_args(args) |
@@ -121,40 +132,73 @@ def _parse_args(args, test_gen): |
return opts |
-def main(name, test_gen, cover_branches=False, args=None): |
+def get_test_gens(test_modules): |
+ test_gens = [] |
+ if not test_modules or test_modules is ALL_MODULES: |
+ # if we're running directly |
+ if __name__ == '__main__' or test_modules is ALL_MODULES: |
+ test_modules = [] |
+ for importer, modname, ispkg in pkgutil.walk_packages(path=['.']): |
+ if not ispkg and modname.endswith('_test'): |
+ if modname in sys.modules: |
+ test_modules.append(sys.modules[modname]) |
+ else: |
+ test_modules.append( |
+ importer.find_module(modname).load_module(modname)) |
+ else: # a wrapper main() script |
+ test_modules = [sys.modules['__main__']] |
+ for mod in test_modules: |
+ for obj in mod.__dict__.values(): |
+ if util.is_test_generator(obj): |
+ test_gens.append(obj) |
+ elif _is_unittest(obj): |
+ test_gens.append(UnittestTestCase(obj)) |
+ return test_gens |
+ |
+ |
+# TODO(iannucci): have Test determine cover_branches |
+def main(cover_branches=False, test_modules=None, args=None): |
"""Entry point for tests using expect_tests. |
Example: |
- import expect_tests |
+ >>> import expect_tests |
- def happy_fn(val): |
- # Usually you would return data which is the result of some deterministic |
- # computation. |
- return expect_tests.Result({'neet': '%s string value' % val}) |
+ >>> def happy_fn(val): |
+ >>> # Usually you would return data which is the result of some |
+ >>> #deterministic computation. |
+ >>> return expect_tests.Result({'neet': '%s string value' % val}) |
- def Gen(): |
- yield expect_tests.Test('happy', happy_fn, args=('happy',)) |
+ >>> @expect_tests.test_generator |
+ >>> def gen(): |
+ >>> yield expect_tests.Test( |
+ >>> __package__ + '.happy', |
+ >>> expect_tests.FuncCall(happy_fn, 'happy')) |
- if __name__ == '__main__': |
- expect_tests.main('happy_test_suite', Gen) |
+ >>> if __name__ == '__main__': |
+ >>> expect_tests.main(cover_branches=True) |
- @param name: Name of the test suite. |
- @param test_gen: A Generator which yields Test objects. |
@param cover_branches: Include branch coverage data (rather than just line |
coverage) |
+ @param test_modules: Modules containing expect_tests generators and/or |
+ unittests. Defaults to the __main__ module, or if |
+ this script is invoked directly, all '_test' modules |
+ under the current working directory. |
@param args: Commandline args (starting at argv[1]) |
""" |
try: |
- opts = _parse_args(args, test_gen) |
+ opts = _parse_args(args, test_modules) |
- cover_ctx = CoverageContext(name, cover_branches, opts.html_report, |
+ cover_ctx = CoverageContext(cover_branches, opts.html_report, |
not opts.handler.SKIP_RUNLOOP) |
+ with cover_ctx.create_subprocess_context(): |
+ test_gens = get_test_gens(test_modules) |
+ |
error, killed = result_loop( |
- test_gen, cover_ctx.create_subprocess_context(), opts) |
+ test_gens, cover_ctx.create_subprocess_context(), opts) |
cover_ctx.cleanup() |
- if not killed and not opts.test_glob: |
+ if not killed and (opts.force_coverage or not opts.test_glob): |
if not cover_ctx.report(opts.verbose): |
sys.exit(2) |