Index: scripts/slave/unittests/expect_tests.py |
diff --git a/scripts/slave/unittests/expect_tests.py b/scripts/slave/unittests/expect_tests.py |
new file mode 100755 |
index 0000000000000000000000000000000000000000..6abdc6be690d18bf773f4bdebd5c70523c7e3901 |
--- /dev/null |
+++ b/scripts/slave/unittests/expect_tests.py |
@@ -0,0 +1,738 @@ |
+#!/usr/bin/python |
+# Copyright 2014 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+ |
+ |
+import Queue |
+import argparse |
+import collections |
+import contextlib |
+import difflib |
+import glob |
+import inspect |
+import multiprocessing |
+import os |
+import pdb |
+import pprint |
+import re |
+import signal |
+import sys |
+import time |
+ |
+from collections import namedtuple |
+ |
+from cStringIO import StringIO |
+ |
+import test_env # pylint: disable=unused-import |
+ |
+import coverage |
+ |
+# Private types (must be module-level to be pickleable) |
+WriteAction = namedtuple('WriteAction', 'test why') |
+NoAction = namedtuple('NoAction', 'test') |
+DirSeen = namedtuple('DirSeen', 'dir') |
+Missing = namedtuple('Missing', 'test') |
+Fail = namedtuple('Fail', 'test diff') |
+Pass = namedtuple('Pass', 'test') |
+ |
+ |
+UnknownError = namedtuple('UnknownError', 'message') |
+TestError = namedtuple('TestError', 'test message') |
+_NonExistant = object() |
+ |
+# Serialization |
+SUPPORTED_SERIALIZERS = {'json', 'yaml'} |
+SERIALIZERS = {} |
+ |
+def re_encode(obj): |
+ if isinstance(obj, dict): |
+ return {re_encode(k): re_encode(v) for k, v in obj.iteritems()} |
+ elif isinstance(obj, list): |
Vadim Sh.
2014/04/01 03:00:26
+ tuple?
iannucci
2014/04/01 03:45:14
I don't think that json.load can generate a tuple
|
+ return [re_encode(i) for i in obj] |
+ elif isinstance(obj, unicode): |
+ return obj.encode('utf-8') |
+ else: |
+ return obj |
+ |
+import json |
Vadim Sh.
2014/04/01 03:00:26
please, keep imports on top :(
iannucci
2014/04/01 03:45:14
Done.
|
+SERIALIZERS['json'] = ( |
+ lambda s: re_encode(json.load(s)), |
+ lambda data, stream: json.dump( |
+ data, stream, sort_keys=True, indent=2, separators=(',', ': '))) |
+ |
+try: |
+ import yaml |
Vadim Sh.
2014/04/01 03:00:26
on top:
try:
import yaml
except ImportError:
y
iannucci
2014/04/01 03:45:14
Done.
|
+ _YAMLSafeLoader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader) |
+ _YAMLSafeDumper = getattr(yaml, 'CSafeDumper', yaml.SafeDumper) |
+ SERIALIZERS['yaml'] = ( |
+ lambda stream: yaml.load(stream, _YAMLSafeLoader), |
Vadim Sh.
2014/04/01 03:00:26
why not just yaml.safe_load?
iannucci
2014/04/01 03:45:14
Because last I checked (~December), it defaults to
Vadim Sh.
2014/04/01 18:27:50
:-/ odd. So C extension is only usable if you are
|
+ lambda data, stream: yaml.dump( |
+ data, stream, _YAMLSafeDumper, default_flow_style=False, |
+ encoding='utf-8')) |
+except ImportError: |
+ pass |
+ |
+ |
+# Result Queue Handlers |
+class _Handler(object): |
+ """Handler object. |
+ |
+ Defines 3 nested classes for each stage of the test pipeline. The pipeline |
+ looks like: |
+ |
+ -> -> |
+ -> jobs -> (main) |
+ GenLoop -> test_queue -> * -> result_queue -> ResultLoop |
+ -> RunLoop -> |
+ -> -> |
+ |
+ Each process will have an instance of one of the nested handler classes, which |
+ will be called on each test / result. |
+ |
+ You can skip the RunLoop phase by setting SKIP_RUNLOOP to True on your |
+ implementation class. |
+ |
+ Tips: |
+ * Only do printing in ResultLoop, since it's running on the main process. |
+ """ |
+ SKIP_RUNLOOP = False |
+ |
+ @staticmethod |
Vadim Sh.
2014/04/01 03:00:26
@classmethod? because, well, it's classier? :)
iannucci
2014/04/01 03:45:14
But it's not a classmethod... it's a static method
Vadim Sh.
2014/04/01 18:27:50
You "override" this method below in _TrainHandler.
|
+ def add_options(parser): |
+ """ |
+ @type parser: argparse.ArgumentParser() |
+ """ |
+ pass |
+ |
+ class GenLoop(object): |
Vadim Sh.
2014/04/01 03:00:26
Why Loop? It looks more like Stage to me...
iannucci
2014/04/01 03:45:14
Done.
|
+ def __init__(self, opts): |
+ self.opts = opts |
+ |
+ def __call__(self, test, put_result): |
+ """Called for each |test| generated which matches the test_globs. |
+ |
+ @param test: The generated Test object. |
Vadim Sh.
2014/04/01 03:00:26
That's a very verbose markup for type information
iannucci
2014/04/01 03:45:14
epydoc
|
+ @type test: Test() |
+ @param put_result: Call with any object to pass it to the ResultLoop. |
+ @return: True iff the RunLoop should run |test| |
+ @rtype: bool |
+ """ |
+ return True |
+ |
+ class RunLoop(object): |
+ def __init__(self, opts): |
+ self.opts = opts |
+ |
+ def __call__(self, test, result, put_result): |
+ """Called for each |test| which ran and generated |result|. |
+ |
+ @param test: The generated Test object which was run. |
+ @type test: Test() |
+ @param result: The result of running |test| |
+ @type result: Result() |
+ @param put_result: Call this with any object to pass it to the ResultLoop |
+ phase. |
+ """ |
+ put_result(result) |
+ |
+ class ResultLoop(object): |
+ def __init__(self, opts): |
+ self.opts = opts |
+ |
+ def __call__(self, obj): |
+ """Called for each obj result from GenLoop or RunLoop. |
+ |
+ @type obj: Anything passed to put_result in GenLoop or RunLoop. |
+ |
+ Works similarly to a SAX XML parser by dispatching to |
+ self.handle_{type(obj).__name__} |
+ |
+ So if |obj| is a Test, this would call self.handle_Test(obj). |
+ |
+ If you implement handle(obj), then it will be called for any |
+ otherwise unhandled obj. |
+ |
+ @return: False iff the program should ultimately terminate with an error |
+ code. Note that None does NOT count as an error condition. |
+ """ |
+ return getattr(self, 'handle_%s' % type(obj).__name__, self.handle)(obj) |
+ |
+ def handle(self, obj): |
+ if self.opts.verbose: |
+ print 'UNHANDLED:', obj |
+ return False |
+ |
+ def finalize(self, aborted): |
+ """Called after __call__() has been called for all results. |
+ |
+ @param aborted: True if the user aborted the run. |
+ @type aborted: bool |
+ """ |
+ pass |
+ |
+ |
+class _ListHandler(_Handler): |
+ """list all of the tests instead of running them.""" |
Vadim Sh.
2014/04/01 03:00:26
nit: List with capital L, same below. Lower case t
iannucci
2014/04/01 03:45:14
Done.
|
+ SKIP_RUNLOOP = True |
+ |
+ class GenLoop(_Handler.GenLoop): |
+ def __call__(self, test, put_result): |
+ put_result(test) |
+ |
+ class ResultLoop(_Handler.ResultLoop): |
+ @staticmethod |
+ def handle_Test(test): |
+ print test.name |
+ |
+ # TODO(iannucci): group tests by dir? |
+ # TODO(iannucci): print more data about the test in verbose mode? |
+ |
+ |
+class _DebugHandler(_Handler): |
+ """execute each test under the pdb debugger.""" |
+ SKIP_RUNLOOP = True |
+ class GenLoop(_Handler.GenLoop): |
Vadim Sh.
2014/04/01 03:00:26
nit: new line
iannucci
2014/04/01 03:45:14
Done.
|
+ def __call__(self, test, put_result): |
+ put_result(test) |
+ |
+ class ResultLoop(_Handler.ResultLoop): |
+ @staticmethod |
+ def handle_Test(test): |
+ dbg = pdb.Pdb() |
+ for path, line, funcname in test.breakpoints: |
+ dbg.set_break(path, line, funcname=funcname) |
+ |
+ dbg.reset() |
+ |
+ def dispatch_thunk(*args): |
+ """Allows us to continue until the actual breakpoint.""" |
+ val = dbg.trace_dispatch(*args) |
+ dbg.set_continue() |
+ sys.settrace(dbg.trace_dispatch) |
+ return val |
+ sys.settrace(dispatch_thunk) |
+ try: |
+ test.run() |
+ except pdb.bdb.BdbQuit: |
+ pass |
+ finally: |
+ dbg.quitting = 1 |
+ sys.settrace(None) |
+ |
+ |
+class _TrainHandler(_Handler): |
+ """write test expectations to disk.""" |
+ @staticmethod |
+ def add_options(parser): |
+ assert isinstance(parser, argparse.ArgumentParser) |
+ parser.add_argument( |
+ '--force', action='store_true', help=( |
+ 'Immediately write expectations to disk instead of determining if ' |
+ "they contain a diff from the current expectations." |
Vadim Sh.
2014/04/01 03:00:26
starting a string with '...' quotes and ending wit
iannucci
2014/04/01 03:45:14
lol, Done.
|
+ )) |
+ |
+ class GenLoop(_Handler.GenLoop): |
+ def __init__(self, *args): |
+ super(_TrainHandler.GenLoop, self).__init__(*args) |
+ self.dirs_seen = set() |
+ |
+ def __call__(self, test, put_result): |
+ if test.expectdir not in self.dirs_seen: |
+ try: |
+ os.makedirs(test.expectdir) |
+ except OSError: |
+ pass |
+ put_result(DirSeen(test.expectdir)) |
+ self.dirs_seen.add(test.expectdir) |
+ return True |
+ |
+ class RunLoop(_Handler.RunLoop): |
+ def __call__(self, test, result, put_result): |
+ if self.opts.force: |
+ _WriteNewData(test, result.data) |
+ put_result(WriteAction(test, 'forced')) |
+ return |
+ |
+ current, same_schema = _GetCurrentData(test) |
+ diff = _DiffData(current, result.data) |
+ if diff or not same_schema: |
+ _WriteNewData(test, result.data) |
+ if current is _NonExistant: |
+ why = 'missing' |
+ elif diff: |
+ why = 'diff' |
+ else: |
+ why = 'schema changed' |
+ put_result(WriteAction(test, why)) |
+ else: |
+ put_result(NoAction(test)) |
+ |
+ class ResultLoop(_Handler.ResultLoop): |
+ def __init__(self, opts): |
+ super(_TrainHandler.ResultLoop, self).__init__(opts) |
+ self.dirs_seen = set() |
+ self.files_expected = collections.defaultdict(set) |
+ self.start = time.time() |
+ self.num_tests = 0 |
+ |
+ def _record_expected(self, test): |
+ head, tail = os.path.split(test.expect_path()) |
+ self.files_expected[head].add(tail) |
+ |
+ def handle_DirSeen(self, dirseen): |
+ self.dirs_seen.add(dirseen.dir) |
+ |
+ def handle_NoAction(self, result): |
+ self._record_expected(result.test) |
+ if self.opts.verbose: |
+ print '%s did not change' % result.test.name |
+ |
+ def handle_WriteAction(self, result): |
+ self._record_expected(result.test) |
+ if not self.opts.quiet: |
+ test = result.test |
+ name = test.expect_path() if self.opts.verbose else test.name |
+ print 'Wrote %s: %s' % (name, result.why) |
+ |
+ def finalize(self, aborted): |
+ if not aborted and not self.opts.test_glob: |
+ for d in self.dirs_seen: |
+ expected = self.files_expected[d] |
+ for f in os.listdir(d): |
+ if f == 'OWNERS': |
+ continue |
+ if f not in expected: |
+ path = os.path.join(d, f) |
+ os.unlink(path) |
+ if self.opts.verbose: |
+ print 'Removed unexpected file', path |
+ if not self.opts.quiet: |
+ num_tests = sum(len(x) for x in self.files_expected.itervalues()) |
+ print 'Trained %d tests in %0.3fs' % ( |
+ num_tests, time.time() - self.start) |
+ |
+ |
+class _TestHandler(_Handler): |
+ """run the tests.""" |
+ class RunLoop(_Handler.RunLoop): |
+ def __call__(self, test, result, put_result): |
+ current, _ = _GetCurrentData(test) |
+ if current is _NonExistant: |
+ put_result(Missing(test)) |
+ else: |
+ diff = _DiffData(current, result.data) |
+ if not diff: |
+ put_result(Pass(test)) |
+ else: |
+ put_result(Fail(test, diff)) |
+ |
+ class ResultLoop(_Handler.ResultLoop): |
+ def __init__(self, *args): |
+ super(_TestHandler.ResultLoop, self).__init__(*args) |
+ self.err_out = StringIO() |
+ self.start = time.time() |
+ self.errors = collections.defaultdict(int) |
+ self.num_tests = 0 |
+ |
+ def emit(self, short, test, verbose): |
+ if self.opts.verbose: |
+ print >> sys.stdout, '%s ... %s' % (test.name if test else '????', |
+ verbose) |
+ else: |
+ sys.stdout.write(short) |
+ sys.stdout.flush() |
+ |
+ def add_result(self, msg_lines, test, header, category): |
+ print >> self.err_out |
+ print >> self.err_out, '=' * 70 |
+ if test is not None: |
+ print >> self.err_out, '%s: %s (%s)' % ( |
+ header, test.name, test.expect_path()) |
+ print >> self.err_out, '-' * 70 |
+ if msg_lines: |
+ print >> self.err_out, '\n'.join(msg_lines) |
+ self.errors[category] += 1 |
+ self.num_tests += 1 |
+ |
+ def handle_Pass(self, p): |
+ if not self.opts.quiet: |
+ self.emit('.', p.test, 'ok') |
+ self.num_tests += 1 |
+ |
+ def handle_Fail(self, fail): |
+ self.emit('F', fail.test, 'FAIL') |
+ self.add_result(fail.diff, fail.test, 'FAIL', 'failures') |
+ return False |
+ |
+ def handle_TestError(self, test_error): |
+ self.emit('E', test_error.test, 'ERROR') |
+ self.add_result([test_error.message], test_error.test, 'ERROR', 'errors') |
+ return False |
+ |
+ def handle_UnknownError(self, error): |
+ self.emit('U', None, 'UNKNOWN ERROR') |
+ self.add_result([error.message], None, 'UNKNOWN ERROR', 'unknown_errors') |
+ return False |
+ |
+ def handle_Missing(self, missing): |
+ self.emit('M', missing.test, 'MISSING') |
+ self.add_result([], missing.test, 'MISSING', 'missing') |
+ return False |
+ |
+ def finalize(self, aborted): |
+ # TODO(iannucci): print summary stats (and timing info?) |
+ buf = self.err_out.getvalue() |
+ if buf: |
+ print buf |
+ if not self.opts.quiet: |
+ print '-' * 70 |
+ print 'Ran %d tests in %0.3fs' % ( |
+ self.num_tests, time.time() - self.start) |
+ if aborted: |
+ print 'ABORTED' |
+ elif self.errors: |
+ print 'FAILED (%s)' % (', '.join('%s=%d' % i |
+ for i in self.errors.iteritems())) |
+ elif not self.opts.quiet: |
+ print 'OK' |
+ |
+ |
+HANDLERS = { |
+ 'list': _ListHandler, |
+ 'debug': _DebugHandler, |
+ 'train': _TrainHandler, |
+ 'test': _TestHandler, |
+} |
+ |
+ |
+# Private engine helpers |
+@contextlib.contextmanager |
+def _cover(opts): |
+ if opts is not None: |
+ c = coverage.coverage(**opts) |
+ c._warn_no_data = False # pylint: disable=protected-access |
+ c.start() |
+ try: |
+ yield |
+ finally: |
+ if opts is not None: |
+ c.stop() |
+ c.save() |
+ |
+ |
+# Private engine implementation |
+def _GetCurrentData(test): |
+ """ |
+ @type test: Test() |
+ @returns: The deserialized data (or _NonExistant), and a boolean indicating |
+ if the current serialized data is in the same format which was |
+ requested by |test|. |
+ @rtype: (dict, bool) |
+ """ |
+ for fmt in sorted(SUPPORTED_SERIALIZERS, key=lambda s: s != test.fmt): |
+ path = test.expect_path(fmt) |
+ if fmt not in SERIALIZERS: |
+ raise Exception('The package to support %s is not installed.' % fmt) |
+ if os.path.exists(path): |
+ with open(path, 'rb') as f: |
+ data = SERIALIZERS[fmt][0](f) |
+ return data, fmt == test.fmt |
+ return _NonExistant, True |
+ |
+ |
+def _WriteNewData(test, data): |
+ """ |
+ @type test: Test() |
+ """ |
+ if test.fmt not in SUPPORTED_SERIALIZERS: |
+ raise Exception('%s is not a supported serializer.' % test.fmt) |
+ if test.fmt not in SERIALIZERS: |
+ raise Exception('The package to support %s is not installed.' % test.fmt) |
+ with open(test.expect_path(), 'wb') as f: |
+ SERIALIZERS[test.fmt][1](data, f) |
+ |
+ |
+def _DiffData(old, new): |
+ """ |
+ Takes old data and new data, then returns a textual diff as a list of lines. |
+ @type old: dict |
+ @type new: dict |
+ @rtype: [str] |
+ """ |
+ if old is _NonExistant: |
+ return new |
+ if old == new: |
+ return [] |
+ else: |
+ return list(difflib.context_diff( |
+ pprint.pformat(old).splitlines(), |
+ pprint.pformat(new).splitlines(), |
+ fromfile='expected', tofile='current', |
+ n=4, lineterm='' |
+ )) |
+ |
+ |
+def _GenLoopProcess(gen, test_queue, result_queue, num_procs, kill_switch, |
+ match_globs, cover_ctx, handler): |
+ """ |
+ Generate `Test`'s from |gen|, and feed them into |test_queue|. |
+ |
+ Non-Test instances will be translated into `UnknownError` objects. |
+ |
+ On completion, feed |num_procs| None objects into |test_queue|. |
+ |
+ @param gen: generator yielding Test() instances. |
+ @type test_queue: multiprocessing.Queue() |
+ @type result_queue: multiprocessing.Queue() |
+ @type num_procs: int |
+ @type kill_switch: multiprocessing.Event() |
+ @type match_globs: [str] |
+ @type cover_ctx: dict |
+ @type handler: _Handler.GenLoop() |
+ """ |
+ try: |
+ matcher = re.compile( |
+ '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g) |
+ for g in match_globs if g[0] != '-')) |
+ if matcher.pattern == '^$': |
+ matcher = re.compile('^.*$') |
+ |
+ neg_matcher = re.compile( |
+ '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g[1:]) |
+ for g in match_globs if g[0] == '-')) |
+ |
+ with cover_ctx: |
+ for test in gen(): |
+ if kill_switch.is_set(): |
+ break |
+ |
+ if not isinstance(test, Test): |
+ result_queue.put_nowait( |
+ UnknownError('Got non-Test isinstance from generator: %r' % test)) |
+ continue |
+ |
+ if not neg_matcher.match(test.name) and matcher.match(test.name): |
+ if handler(test, result_queue.put_nowait): |
+ test_queue.put_nowait(test) |
+ |
+ for _ in xrange(num_procs): |
+ test_queue.put_nowait(None) |
Vadim Sh.
2014/04/01 03:00:26
Hm.. Maybe it should be in 'finally' block?
iannucci
2014/04/01 03:45:14
Good point. Done.
|
+ except KeyboardInterrupt: |
+ pass |
+ |
+ |
+def _RunLoopProcess(test_queue, result_queue, kill_switch, cover_ctx, |
+ handler): |
+ """ |
+ Consume `Test` instances from |test_queue|, run them, and push the `Result`s |
+ into |result_queue|. |
+ |
+ Generates coverage data as a side-effect. |
+ @type test_queue: multiprocessing.Queue() |
+ @type result_queue: multiprocessing.Queue() |
+ @type kill_switch: multiprocessing.Event() |
+ @type handler: _Handler.RunLoop() |
+ """ |
+ try: |
+ with cover_ctx: |
+ while not kill_switch.is_set(): |
+ try: |
+ test = test_queue.get(timeout=0.1) |
+ if test is None: |
+ break |
+ except Queue.Empty: |
+ continue |
+ |
+ try: |
+ rslt = test.run() |
+ if not isinstance(rslt, Result): |
+ result_queue.put_nowait( |
+ TestError(test, 'Got non-Result instance from test: %r' % rslt)) |
+ continue |
+ |
+ handler(test, rslt, result_queue.put_nowait) |
+ except Exception as e: |
+ # TODO(iannucci): include stacktrace |
+ result_queue.put_nowait(TestError(test, str(e))) |
+ except KeyboardInterrupt: |
+ pass |
+ |
+ |
+# Private CLI implementation |
+def parse_args(args): |
+ args = args or sys.argv[1:] |
+ |
+ # Set the default mode if not specified and not passing --help |
+ search_names = set(HANDLERS.keys() + ['-h', '--help']) |
+ if not any(arg in search_names for arg in args): |
+ args.insert(0, 'test') |
+ |
+ parser = argparse.ArgumentParser() |
+ subparsers = parser.add_subparsers( |
+ title='Mode (default "test")', dest='mode', |
+ help='See `[mode] --help` for more options.') |
Vadim Sh.
2014/04/01 03:00:26
does '`' have any special meaning here? I thought
iannucci
2014/04/01 03:45:14
No, just implies 'thing to run'. Should I change t
|
+ |
+ for k, h in HANDLERS.iteritems(): |
+ sp = subparsers.add_parser(k, help=h.__doc__) |
+ h.add_options(sp) |
+ |
+ mg = sp.add_mutually_exclusive_group() |
+ mg.add_argument( |
+ '--quiet', action='store_true', |
+ help='be quiet (only print failures)') |
+ mg.add_argument( |
+ '--verbose', action='store_true', help='be verbose') |
+ |
+ if not h.SKIP_RUNLOOP: |
+ sp.add_argument( |
+ '--jobs', metavar='N', type=int, |
+ default=multiprocessing.cpu_count(), |
Vadim Sh.
2014/04/01 03:00:26
I think it can lie on windows. But we probably don
iannucci
2014/04/01 03:45:14
Yeah don't really care. It's probably real cores n
|
+ help='run N jobs in parallel (default %(default)s)') |
+ |
+ sp.add_argument( |
+ '--test_list', metavar="FILE", |
Vadim Sh.
2014/04/01 03:00:26
nit: 'FILE' not "FILE"
iannucci
2014/04/01 03:45:14
Done.
|
+ help='take the list of test globs from the FILE (use "-" for stdin)') |
+ |
+ sp.add_argument( |
+ 'test_glob', nargs='*', help=( |
+ 'glob to filter the tests acted on. If the glob begins with "-" ' |
+ 'then it acts as a negation glob and anything which matches it ' |
+ 'will be skipped.')) |
+ |
+ opts = parser.parse_args(args) |
+ |
+ if not hasattr(opts, 'jobs'): |
+ opts.jobs = 0 |
+ elif opts.jobs < 1: |
+ parser.error('--jobs was less than 1') |
+ |
+ if opts.test_list: |
+ with open(opts.test_list, 'rb') as tl: |
Vadim Sh.
2014/04/01 03:00:26
What about '-' handling?
iannucci
2014/04/01 03:45:14
Erp. good catch.
|
+ opts.test_glob += [l.strip() for l in tl.readlines()] |
+ |
+ test_globs = opts.test_glob |
+ handler = HANDLERS[opts.mode] |
+ |
+ del opts.test_list |
Vadim Sh.
2014/04/01 03:00:26
why?
iannucci
2014/04/01 03:45:14
Don't really want anyone directly inspecting these
|
+ del opts.mode |
+ |
+ return opts, handler, test_globs |
+ |
+ |
+# Public |
+Result = namedtuple('Result', 'data') |
+ |
+ |
+_Test = namedtuple('Test', |
+ 'name func args kwargs expectdir expectbase fmt breakpoints') |
Vadim Sh.
2014/04/01 03:00:26
nit: expect_dir, expect_base.
iannucci
2014/04/01 03:45:14
Done.
|
+ |
+class Test(_Test): |
+ def __new__(cls, name, func, args=(), kwargs=None, expectdir=None, |
Vadim Sh.
2014/04/01 03:00:26
Doc strings for args. Esp. breakpoints and break_f
iannucci
2014/04/01 06:22:08
Done.
|
+ expectbase=None, fmt='json', breakpoints=None, break_funcs=()): |
+ kwargs = kwargs or {} |
+ |
+ breakpoints = breakpoints or [] |
+ if not breakpoints or break_funcs: |
+ for f in (break_funcs or (func,)): |
Vadim Sh.
2014/04/01 03:00:26
Why 'or (func,)' here?
iannucci
2014/04/01 03:45:14
Want it do default it to breakpointing on the user
|
+ if hasattr(f, 'im_func'): |
Vadim Sh.
2014/04/01 03:00:26
What's im_func?
iannucci
2014/04/01 03:45:14
I stole this from the pdb implementation, but it's
|
+ f = f.im_func |
+ breakpoints.append((f.func_code.co_filename, |
+ f.func_code.co_firstlineno, |
+ f.func_code.co_name)) |
iannucci
2014/04/01 01:47:08
Need to do this lookup right away since we can't b
|
+ |
+ return super(Test, cls).__new__(cls, name, func, args, kwargs, expectdir, |
+ expectbase, fmt, breakpoints) |
+ |
+ def expect_path(self, fmt=None): |
Vadim Sh.
2014/04/01 03:00:26
nit: fmt -> ext?
iannucci
2014/04/01 03:45:14
Done.
|
+ name = self.expectbase or self.name |
+ name = ''.join('_' if c in '<>:"\\/|?*\0' else c for c in name) |
+ return os.path.join(self.expectdir, name + ('.%s' % (fmt or self.fmt))) |
+ |
+ def run(self): |
+ return self.func(*self.args, **self.kwargs) |
+ |
+ |
+def main(test_gen, coverage_includes=None, coverage_omits=None, args=None): |
Vadim Sh.
2014/04/01 03:00:26
Doc string.
iannucci
2014/04/01 06:22:08
Done.
|
+ opts, handler, test_globs = parse_args(args) |
+ result_handler = handler.ResultLoop(opts) |
+ |
+ kill_switch = multiprocessing.Event() |
+ signal.signal(signal.SIGINT, lambda *_: kill_switch.set()) |
Vadim Sh.
2014/04/01 03:00:26
what about SIGTERM?
iannucci
2014/04/01 03:45:14
Done.
|
+ |
+ if handler.SKIP_RUNLOOP: |
+ coverage_opts = None |
+ else: |
+ coverage_opts = { |
+ 'include': coverage_includes, |
+ 'omit': coverage_omits, |
+ 'data_suffix': True |
+ } |
+ c = coverage.coverage(**coverage_opts) |
+ c.erase() |
+ cover_ctx = _cover(coverage_opts) |
+ |
+ test_queue = multiprocessing.Queue() |
+ result_queue = multiprocessing.Queue() |
+ |
+ test_gen_args = ( |
+ test_gen, test_queue, result_queue, opts.jobs, kill_switch, |
+ test_globs, cover_ctx, handler.GenLoop(opts)) |
+ |
+ procs = [] |
+ if handler.SKIP_RUNLOOP: |
+ _GenLoopProcess(*test_gen_args) |
+ else: |
+ procs = [multiprocessing.Process( |
+ target=_GenLoopProcess, args=test_gen_args)] |
Vadim Sh.
2014/04/01 03:00:26
Maybe always run _GenLoopProcess in current proces
iannucci
2014/04/01 03:45:14
It could take a while though, and we want to start
|
+ |
+ procs += [ |
+ multiprocessing.Process( |
+ target=_RunLoopProcess, args=( |
+ test_queue, result_queue, kill_switch, cover_ctx, |
+ handler.RunLoop(opts))) |
+ for _ in xrange(opts.jobs) |
+ ] |
+ |
+ for p in procs: |
+ p.daemon = True |
+ p.start() |
+ |
+ error = False |
+ while not kill_switch.is_set(): |
+ while not kill_switch.is_set(): |
+ try: |
+ error |= result_handler(result_queue.get(timeout=0.1)) is False |
Vadim Sh.
2014/04/01 03:00:26
'is False' is scetchy. Maybe 'is Failure' where Fa
iannucci
2014/04/01 03:45:14
Unfortunately the result is coming across a pickle
|
+ except Queue.Empty: |
+ break |
+ |
+ procs = [p for p in procs if p.is_alive()] |
+ if not procs: |
Vadim Sh.
2014/04/01 03:00:26
nit: if not any(p.is_alive() for p in procs):
iannucci
2014/04/01 03:45:14
Done.
|
+ break |
+ result_handler.finalize(kill_switch.is_set()) |
+ |
+ assert kill_switch.is_set() or result_queue.empty() |
+ |
+ if not handler.SKIP_RUNLOOP: |
+ c.combine() |
Vadim Sh.
2014/04/01 03:00:26
How does it work?
iannucci
2014/04/01 03:45:14
magic.... it looks for all files .coverage.hostnam
|
+ if not kill_switch.is_set() and not opts.test_glob: |
+ outf = StringIO() |
+ total_covered = c.report(file=outf) |
+ summary = outf.getvalue().replace('%- 15s' % 'Name', 'Coverage Report', 1) |
+ if opts.verbose: |
+ print summary |
+ elif total_covered != 100.0: |
+ lines = summary.splitlines() |
+ lines[2:-2] = [l for l in lines[2:-2] |
+ if not l.strip().endswith('100%')] |
+ print '\n'.join(lines) |
+ print 'FATAL: Recipes configs are not at 100% coverage.' |
+ sys.exit(2) |
+ |
+ sys.exit(error or kill_switch.is_set()) |