OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/python | |
2 # Copyright 2014 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 | |
7 import Queue | |
8 import argparse | |
9 import collections | |
10 import contextlib | |
11 import difflib | |
12 import glob | |
13 import inspect | |
14 import multiprocessing | |
15 import os | |
16 import pdb | |
17 import pprint | |
18 import re | |
19 import signal | |
20 import sys | |
21 import time | |
22 | |
23 from collections import namedtuple | |
24 | |
25 from cStringIO import StringIO | |
26 | |
27 import test_env # pylint: disable=unused-import | |
28 | |
29 import coverage | |
30 | |
31 # Private types (must be module-level to be pickleable) | |
32 WriteAction = namedtuple('WriteAction', 'test why') | |
33 NoAction = namedtuple('NoAction', 'test') | |
34 DirSeen = namedtuple('DirSeen', 'dir') | |
35 Missing = namedtuple('Missing', 'test') | |
36 Fail = namedtuple('Fail', 'test diff') | |
37 Pass = namedtuple('Pass', 'test') | |
38 | |
39 | |
40 UnknownError = namedtuple('UnknownError', 'message') | |
41 TestError = namedtuple('TestError', 'test message') | |
42 _NonExistant = object() | |
43 | |
44 # Serialization | |
45 SUPPORTED_SERIALIZERS = {'json', 'yaml'} | |
46 SERIALIZERS = {} | |
47 | |
48 def re_encode(obj): | |
49 if isinstance(obj, dict): | |
50 return {re_encode(k): re_encode(v) for k, v in obj.iteritems()} | |
51 elif isinstance(obj, list): | |
Vadim Sh.
2014/04/01 03:00:26
+ tuple?
iannucci
2014/04/01 03:45:14
I don't think that json.load can generate a tuple
| |
52 return [re_encode(i) for i in obj] | |
53 elif isinstance(obj, unicode): | |
54 return obj.encode('utf-8') | |
55 else: | |
56 return obj | |
57 | |
58 import json | |
Vadim Sh.
2014/04/01 03:00:26
please, keep imports on top :(
iannucci
2014/04/01 03:45:14
Done.
| |
59 SERIALIZERS['json'] = ( | |
60 lambda s: re_encode(json.load(s)), | |
61 lambda data, stream: json.dump( | |
62 data, stream, sort_keys=True, indent=2, separators=(',', ': '))) | |
63 | |
64 try: | |
65 import yaml | |
Vadim Sh.
2014/04/01 03:00:26
on top:
try:
import yaml
except ImportError:
y
iannucci
2014/04/01 03:45:14
Done.
| |
66 _YAMLSafeLoader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader) | |
67 _YAMLSafeDumper = getattr(yaml, 'CSafeDumper', yaml.SafeDumper) | |
68 SERIALIZERS['yaml'] = ( | |
69 lambda stream: yaml.load(stream, _YAMLSafeLoader), | |
Vadim Sh.
2014/04/01 03:00:26
why not just yaml.safe_load?
iannucci
2014/04/01 03:45:14
Because last I checked (~December), it defaults to
Vadim Sh.
2014/04/01 18:27:50
:-/ odd. So C extension is only usable if you are
| |
70 lambda data, stream: yaml.dump( | |
71 data, stream, _YAMLSafeDumper, default_flow_style=False, | |
72 encoding='utf-8')) | |
73 except ImportError: | |
74 pass | |
75 | |
76 | |
77 # Result Queue Handlers | |
78 class _Handler(object): | |
79 """Handler object. | |
80 | |
81 Defines 3 nested classes for each stage of the test pipeline. The pipeline | |
82 looks like: | |
83 | |
84 -> -> | |
85 -> jobs -> (main) | |
86 GenLoop -> test_queue -> * -> result_queue -> ResultLoop | |
87 -> RunLoop -> | |
88 -> -> | |
89 | |
90 Each process will have an instance of one of the nested handler classes, which | |
91 will be called on each test / result. | |
92 | |
93 You can skip the RunLoop phase by setting SKIP_RUNLOOP to True on your | |
94 implementation class. | |
95 | |
96 Tips: | |
97 * Only do printing in ResultLoop, since it's running on the main process. | |
98 """ | |
99 SKIP_RUNLOOP = False | |
100 | |
101 @staticmethod | |
Vadim Sh.
2014/04/01 03:00:26
@classmethod? because, well, it's classier? :)
iannucci
2014/04/01 03:45:14
But it's not a classmethod... it's a static method
Vadim Sh.
2014/04/01 18:27:50
You "override" this method below in _TrainHandler.
| |
102 def add_options(parser): | |
103 """ | |
104 @type parser: argparse.ArgumentParser() | |
105 """ | |
106 pass | |
107 | |
108 class GenLoop(object): | |
Vadim Sh.
2014/04/01 03:00:26
Why Loop? It looks more like Stage to me...
iannucci
2014/04/01 03:45:14
Done.
| |
109 def __init__(self, opts): | |
110 self.opts = opts | |
111 | |
112 def __call__(self, test, put_result): | |
113 """Called for each |test| generated which matches the test_globs. | |
114 | |
115 @param test: The generated Test object. | |
Vadim Sh.
2014/04/01 03:00:26
That's a very verbose markup for type information
iannucci
2014/04/01 03:45:14
epydoc
| |
116 @type test: Test() | |
117 @param put_result: Call with any object to pass it to the ResultLoop. | |
118 @return: True iff the RunLoop should run |test| | |
119 @rtype: bool | |
120 """ | |
121 return True | |
122 | |
123 class RunLoop(object): | |
124 def __init__(self, opts): | |
125 self.opts = opts | |
126 | |
127 def __call__(self, test, result, put_result): | |
128 """Called for each |test| which ran and generated |result|. | |
129 | |
130 @param test: The generated Test object which was run. | |
131 @type test: Test() | |
132 @param result: The result of running |test| | |
133 @type result: Result() | |
134 @param put_result: Call this with any object to pass it to the ResultLoop | |
135 phase. | |
136 """ | |
137 put_result(result) | |
138 | |
139 class ResultLoop(object): | |
140 def __init__(self, opts): | |
141 self.opts = opts | |
142 | |
143 def __call__(self, obj): | |
144 """Called for each obj result from GenLoop or RunLoop. | |
145 | |
146 @type obj: Anything passed to put_result in GenLoop or RunLoop. | |
147 | |
148 Works similarly to a SAX XML parser by dispatching to | |
149 self.handle_{type(obj).__name__} | |
150 | |
151 So if |obj| is a Test, this would call self.handle_Test(obj). | |
152 | |
153 If you implement handle(obj), then it will be called for any | |
154 otherwise unhandled obj. | |
155 | |
156 @return: False iff the program should ultimately terminate with an error | |
157 code. Note that None does NOT count as an error condition. | |
158 """ | |
159 return getattr(self, 'handle_%s' % type(obj).__name__, self.handle)(obj) | |
160 | |
161 def handle(self, obj): | |
162 if self.opts.verbose: | |
163 print 'UNHANDLED:', obj | |
164 return False | |
165 | |
166 def finalize(self, aborted): | |
167 """Called after __call__() has been called for all results. | |
168 | |
169 @param aborted: True if the user aborted the run. | |
170 @type aborted: bool | |
171 """ | |
172 pass | |
173 | |
174 | |
175 class _ListHandler(_Handler): | |
176 """list all of the tests instead of running them.""" | |
Vadim Sh.
2014/04/01 03:00:26
nit: List with capital L, same below. Lower case t
iannucci
2014/04/01 03:45:14
Done.
| |
177 SKIP_RUNLOOP = True | |
178 | |
179 class GenLoop(_Handler.GenLoop): | |
180 def __call__(self, test, put_result): | |
181 put_result(test) | |
182 | |
183 class ResultLoop(_Handler.ResultLoop): | |
184 @staticmethod | |
185 def handle_Test(test): | |
186 print test.name | |
187 | |
188 # TODO(iannucci): group tests by dir? | |
189 # TODO(iannucci): print more data about the test in verbose mode? | |
190 | |
191 | |
192 class _DebugHandler(_Handler): | |
193 """execute each test under the pdb debugger.""" | |
194 SKIP_RUNLOOP = True | |
195 class GenLoop(_Handler.GenLoop): | |
Vadim Sh.
2014/04/01 03:00:26
nit: new line
iannucci
2014/04/01 03:45:14
Done.
| |
196 def __call__(self, test, put_result): | |
197 put_result(test) | |
198 | |
199 class ResultLoop(_Handler.ResultLoop): | |
200 @staticmethod | |
201 def handle_Test(test): | |
202 dbg = pdb.Pdb() | |
203 for path, line, funcname in test.breakpoints: | |
204 dbg.set_break(path, line, funcname=funcname) | |
205 | |
206 dbg.reset() | |
207 | |
208 def dispatch_thunk(*args): | |
209 """Allows us to continue until the actual breakpoint.""" | |
210 val = dbg.trace_dispatch(*args) | |
211 dbg.set_continue() | |
212 sys.settrace(dbg.trace_dispatch) | |
213 return val | |
214 sys.settrace(dispatch_thunk) | |
215 try: | |
216 test.run() | |
217 except pdb.bdb.BdbQuit: | |
218 pass | |
219 finally: | |
220 dbg.quitting = 1 | |
221 sys.settrace(None) | |
222 | |
223 | |
224 class _TrainHandler(_Handler): | |
225 """write test expectations to disk.""" | |
226 @staticmethod | |
227 def add_options(parser): | |
228 assert isinstance(parser, argparse.ArgumentParser) | |
229 parser.add_argument( | |
230 '--force', action='store_true', help=( | |
231 'Immediately write expectations to disk instead of determining if ' | |
232 "they contain a diff from the current expectations." | |
Vadim Sh.
2014/04/01 03:00:26
starting a string with '...' quotes and ending wit
iannucci
2014/04/01 03:45:14
lol, Done.
| |
233 )) | |
234 | |
235 class GenLoop(_Handler.GenLoop): | |
236 def __init__(self, *args): | |
237 super(_TrainHandler.GenLoop, self).__init__(*args) | |
238 self.dirs_seen = set() | |
239 | |
240 def __call__(self, test, put_result): | |
241 if test.expectdir not in self.dirs_seen: | |
242 try: | |
243 os.makedirs(test.expectdir) | |
244 except OSError: | |
245 pass | |
246 put_result(DirSeen(test.expectdir)) | |
247 self.dirs_seen.add(test.expectdir) | |
248 return True | |
249 | |
250 class RunLoop(_Handler.RunLoop): | |
251 def __call__(self, test, result, put_result): | |
252 if self.opts.force: | |
253 _WriteNewData(test, result.data) | |
254 put_result(WriteAction(test, 'forced')) | |
255 return | |
256 | |
257 current, same_schema = _GetCurrentData(test) | |
258 diff = _DiffData(current, result.data) | |
259 if diff or not same_schema: | |
260 _WriteNewData(test, result.data) | |
261 if current is _NonExistant: | |
262 why = 'missing' | |
263 elif diff: | |
264 why = 'diff' | |
265 else: | |
266 why = 'schema changed' | |
267 put_result(WriteAction(test, why)) | |
268 else: | |
269 put_result(NoAction(test)) | |
270 | |
271 class ResultLoop(_Handler.ResultLoop): | |
272 def __init__(self, opts): | |
273 super(_TrainHandler.ResultLoop, self).__init__(opts) | |
274 self.dirs_seen = set() | |
275 self.files_expected = collections.defaultdict(set) | |
276 self.start = time.time() | |
277 self.num_tests = 0 | |
278 | |
279 def _record_expected(self, test): | |
280 head, tail = os.path.split(test.expect_path()) | |
281 self.files_expected[head].add(tail) | |
282 | |
283 def handle_DirSeen(self, dirseen): | |
284 self.dirs_seen.add(dirseen.dir) | |
285 | |
286 def handle_NoAction(self, result): | |
287 self._record_expected(result.test) | |
288 if self.opts.verbose: | |
289 print '%s did not change' % result.test.name | |
290 | |
291 def handle_WriteAction(self, result): | |
292 self._record_expected(result.test) | |
293 if not self.opts.quiet: | |
294 test = result.test | |
295 name = test.expect_path() if self.opts.verbose else test.name | |
296 print 'Wrote %s: %s' % (name, result.why) | |
297 | |
298 def finalize(self, aborted): | |
299 if not aborted and not self.opts.test_glob: | |
300 for d in self.dirs_seen: | |
301 expected = self.files_expected[d] | |
302 for f in os.listdir(d): | |
303 if f == 'OWNERS': | |
304 continue | |
305 if f not in expected: | |
306 path = os.path.join(d, f) | |
307 os.unlink(path) | |
308 if self.opts.verbose: | |
309 print 'Removed unexpected file', path | |
310 if not self.opts.quiet: | |
311 num_tests = sum(len(x) for x in self.files_expected.itervalues()) | |
312 print 'Trained %d tests in %0.3fs' % ( | |
313 num_tests, time.time() - self.start) | |
314 | |
315 | |
316 class _TestHandler(_Handler): | |
317 """run the tests.""" | |
318 class RunLoop(_Handler.RunLoop): | |
319 def __call__(self, test, result, put_result): | |
320 current, _ = _GetCurrentData(test) | |
321 if current is _NonExistant: | |
322 put_result(Missing(test)) | |
323 else: | |
324 diff = _DiffData(current, result.data) | |
325 if not diff: | |
326 put_result(Pass(test)) | |
327 else: | |
328 put_result(Fail(test, diff)) | |
329 | |
330 class ResultLoop(_Handler.ResultLoop): | |
331 def __init__(self, *args): | |
332 super(_TestHandler.ResultLoop, self).__init__(*args) | |
333 self.err_out = StringIO() | |
334 self.start = time.time() | |
335 self.errors = collections.defaultdict(int) | |
336 self.num_tests = 0 | |
337 | |
338 def emit(self, short, test, verbose): | |
339 if self.opts.verbose: | |
340 print >> sys.stdout, '%s ... %s' % (test.name if test else '????', | |
341 verbose) | |
342 else: | |
343 sys.stdout.write(short) | |
344 sys.stdout.flush() | |
345 | |
346 def add_result(self, msg_lines, test, header, category): | |
347 print >> self.err_out | |
348 print >> self.err_out, '=' * 70 | |
349 if test is not None: | |
350 print >> self.err_out, '%s: %s (%s)' % ( | |
351 header, test.name, test.expect_path()) | |
352 print >> self.err_out, '-' * 70 | |
353 if msg_lines: | |
354 print >> self.err_out, '\n'.join(msg_lines) | |
355 self.errors[category] += 1 | |
356 self.num_tests += 1 | |
357 | |
358 def handle_Pass(self, p): | |
359 if not self.opts.quiet: | |
360 self.emit('.', p.test, 'ok') | |
361 self.num_tests += 1 | |
362 | |
363 def handle_Fail(self, fail): | |
364 self.emit('F', fail.test, 'FAIL') | |
365 self.add_result(fail.diff, fail.test, 'FAIL', 'failures') | |
366 return False | |
367 | |
368 def handle_TestError(self, test_error): | |
369 self.emit('E', test_error.test, 'ERROR') | |
370 self.add_result([test_error.message], test_error.test, 'ERROR', 'errors') | |
371 return False | |
372 | |
373 def handle_UnknownError(self, error): | |
374 self.emit('U', None, 'UNKNOWN ERROR') | |
375 self.add_result([error.message], None, 'UNKNOWN ERROR', 'unknown_errors') | |
376 return False | |
377 | |
378 def handle_Missing(self, missing): | |
379 self.emit('M', missing.test, 'MISSING') | |
380 self.add_result([], missing.test, 'MISSING', 'missing') | |
381 return False | |
382 | |
383 def finalize(self, aborted): | |
384 # TODO(iannucci): print summary stats (and timing info?) | |
385 buf = self.err_out.getvalue() | |
386 if buf: | |
387 print | |
388 print buf | |
389 if not self.opts.quiet: | |
390 print | |
391 print '-' * 70 | |
392 print 'Ran %d tests in %0.3fs' % ( | |
393 self.num_tests, time.time() - self.start) | |
394 print | |
395 if aborted: | |
396 print 'ABORTED' | |
397 elif self.errors: | |
398 print 'FAILED (%s)' % (', '.join('%s=%d' % i | |
399 for i in self.errors.iteritems())) | |
400 elif not self.opts.quiet: | |
401 print 'OK' | |
402 | |
403 | |
404 HANDLERS = { | |
405 'list': _ListHandler, | |
406 'debug': _DebugHandler, | |
407 'train': _TrainHandler, | |
408 'test': _TestHandler, | |
409 } | |
410 | |
411 | |
412 # Private engine helpers | |
413 @contextlib.contextmanager | |
414 def _cover(opts): | |
415 if opts is not None: | |
416 c = coverage.coverage(**opts) | |
417 c._warn_no_data = False # pylint: disable=protected-access | |
418 c.start() | |
419 try: | |
420 yield | |
421 finally: | |
422 if opts is not None: | |
423 c.stop() | |
424 c.save() | |
425 | |
426 | |
427 # Private engine implementation | |
428 def _GetCurrentData(test): | |
429 """ | |
430 @type test: Test() | |
431 @returns: The deserialized data (or _NonExistant), and a boolean indicating | |
432 if the current serialized data is in the same format which was | |
433 requested by |test|. | |
434 @rtype: (dict, bool) | |
435 """ | |
436 for fmt in sorted(SUPPORTED_SERIALIZERS, key=lambda s: s != test.fmt): | |
437 path = test.expect_path(fmt) | |
438 if fmt not in SERIALIZERS: | |
439 raise Exception('The package to support %s is not installed.' % fmt) | |
440 if os.path.exists(path): | |
441 with open(path, 'rb') as f: | |
442 data = SERIALIZERS[fmt][0](f) | |
443 return data, fmt == test.fmt | |
444 return _NonExistant, True | |
445 | |
446 | |
447 def _WriteNewData(test, data): | |
448 """ | |
449 @type test: Test() | |
450 """ | |
451 if test.fmt not in SUPPORTED_SERIALIZERS: | |
452 raise Exception('%s is not a supported serializer.' % test.fmt) | |
453 if test.fmt not in SERIALIZERS: | |
454 raise Exception('The package to support %s is not installed.' % test.fmt) | |
455 with open(test.expect_path(), 'wb') as f: | |
456 SERIALIZERS[test.fmt][1](data, f) | |
457 | |
458 | |
459 def _DiffData(old, new): | |
460 """ | |
461 Takes old data and new data, then returns a textual diff as a list of lines. | |
462 @type old: dict | |
463 @type new: dict | |
464 @rtype: [str] | |
465 """ | |
466 if old is _NonExistant: | |
467 return new | |
468 if old == new: | |
469 return [] | |
470 else: | |
471 return list(difflib.context_diff( | |
472 pprint.pformat(old).splitlines(), | |
473 pprint.pformat(new).splitlines(), | |
474 fromfile='expected', tofile='current', | |
475 n=4, lineterm='' | |
476 )) | |
477 | |
478 | |
479 def _GenLoopProcess(gen, test_queue, result_queue, num_procs, kill_switch, | |
480 match_globs, cover_ctx, handler): | |
481 """ | |
482 Generate `Test`'s from |gen|, and feed them into |test_queue|. | |
483 | |
484 Non-Test instances will be translated into `UnknownError` objects. | |
485 | |
486 On completion, feed |num_procs| None objects into |test_queue|. | |
487 | |
488 @param gen: generator yielding Test() instances. | |
489 @type test_queue: multiprocessing.Queue() | |
490 @type result_queue: multiprocessing.Queue() | |
491 @type num_procs: int | |
492 @type kill_switch: multiprocessing.Event() | |
493 @type match_globs: [str] | |
494 @type cover_ctx: dict | |
495 @type handler: _Handler.GenLoop() | |
496 """ | |
497 try: | |
498 matcher = re.compile( | |
499 '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g) | |
500 for g in match_globs if g[0] != '-')) | |
501 if matcher.pattern == '^$': | |
502 matcher = re.compile('^.*$') | |
503 | |
504 neg_matcher = re.compile( | |
505 '^%s$' % '|'.join('(?:%s)' % glob.fnmatch.translate(g[1:]) | |
506 for g in match_globs if g[0] == '-')) | |
507 | |
508 with cover_ctx: | |
509 for test in gen(): | |
510 if kill_switch.is_set(): | |
511 break | |
512 | |
513 if not isinstance(test, Test): | |
514 result_queue.put_nowait( | |
515 UnknownError('Got non-Test isinstance from generator: %r' % test)) | |
516 continue | |
517 | |
518 if not neg_matcher.match(test.name) and matcher.match(test.name): | |
519 if handler(test, result_queue.put_nowait): | |
520 test_queue.put_nowait(test) | |
521 | |
522 for _ in xrange(num_procs): | |
523 test_queue.put_nowait(None) | |
Vadim Sh.
2014/04/01 03:00:26
Hm.. Maybe it should be in 'finally' block?
iannucci
2014/04/01 03:45:14
Good point. Done.
| |
524 except KeyboardInterrupt: | |
525 pass | |
526 | |
527 | |
528 def _RunLoopProcess(test_queue, result_queue, kill_switch, cover_ctx, | |
529 handler): | |
530 """ | |
531 Consume `Test` instances from |test_queue|, run them, and push the `Result`s | |
532 into |result_queue|. | |
533 | |
534 Generates coverage data as a side-effect. | |
535 @type test_queue: multiprocessing.Queue() | |
536 @type result_queue: multiprocessing.Queue() | |
537 @type kill_switch: multiprocessing.Event() | |
538 @type handler: _Handler.RunLoop() | |
539 """ | |
540 try: | |
541 with cover_ctx: | |
542 while not kill_switch.is_set(): | |
543 try: | |
544 test = test_queue.get(timeout=0.1) | |
545 if test is None: | |
546 break | |
547 except Queue.Empty: | |
548 continue | |
549 | |
550 try: | |
551 rslt = test.run() | |
552 if not isinstance(rslt, Result): | |
553 result_queue.put_nowait( | |
554 TestError(test, 'Got non-Result instance from test: %r' % rslt)) | |
555 continue | |
556 | |
557 handler(test, rslt, result_queue.put_nowait) | |
558 except Exception as e: | |
559 # TODO(iannucci): include stacktrace | |
560 result_queue.put_nowait(TestError(test, str(e))) | |
561 except KeyboardInterrupt: | |
562 pass | |
563 | |
564 | |
565 # Private CLI implementation | |
566 def parse_args(args): | |
567 args = args or sys.argv[1:] | |
568 | |
569 # Set the default mode if not specified and not passing --help | |
570 search_names = set(HANDLERS.keys() + ['-h', '--help']) | |
571 if not any(arg in search_names for arg in args): | |
572 args.insert(0, 'test') | |
573 | |
574 parser = argparse.ArgumentParser() | |
575 subparsers = parser.add_subparsers( | |
576 title='Mode (default "test")', dest='mode', | |
577 help='See `[mode] --help` for more options.') | |
Vadim Sh.
2014/04/01 03:00:26
does '`' have any special meaning here? I thought
iannucci
2014/04/01 03:45:14
No, just implies 'thing to run'. Should I change t
| |
578 | |
579 for k, h in HANDLERS.iteritems(): | |
580 sp = subparsers.add_parser(k, help=h.__doc__) | |
581 h.add_options(sp) | |
582 | |
583 mg = sp.add_mutually_exclusive_group() | |
584 mg.add_argument( | |
585 '--quiet', action='store_true', | |
586 help='be quiet (only print failures)') | |
587 mg.add_argument( | |
588 '--verbose', action='store_true', help='be verbose') | |
589 | |
590 if not h.SKIP_RUNLOOP: | |
591 sp.add_argument( | |
592 '--jobs', metavar='N', type=int, | |
593 default=multiprocessing.cpu_count(), | |
Vadim Sh.
2014/04/01 03:00:26
I think it can lie on windows. But we probably don
iannucci
2014/04/01 03:45:14
Yeah don't really care. It's probably real cores n
| |
594 help='run N jobs in parallel (default %(default)s)') | |
595 | |
596 sp.add_argument( | |
597 '--test_list', metavar="FILE", | |
Vadim Sh.
2014/04/01 03:00:26
nit: 'FILE' not "FILE"
iannucci
2014/04/01 03:45:14
Done.
| |
598 help='take the list of test globs from the FILE (use "-" for stdin)') | |
599 | |
600 sp.add_argument( | |
601 'test_glob', nargs='*', help=( | |
602 'glob to filter the tests acted on. If the glob begins with "-" ' | |
603 'then it acts as a negation glob and anything which matches it ' | |
604 'will be skipped.')) | |
605 | |
606 opts = parser.parse_args(args) | |
607 | |
608 if not hasattr(opts, 'jobs'): | |
609 opts.jobs = 0 | |
610 elif opts.jobs < 1: | |
611 parser.error('--jobs was less than 1') | |
612 | |
613 if opts.test_list: | |
614 with open(opts.test_list, 'rb') as tl: | |
Vadim Sh.
2014/04/01 03:00:26
What about '-' handling?
iannucci
2014/04/01 03:45:14
Erp. good catch.
| |
615 opts.test_glob += [l.strip() for l in tl.readlines()] | |
616 | |
617 test_globs = opts.test_glob | |
618 handler = HANDLERS[opts.mode] | |
619 | |
620 del opts.test_list | |
Vadim Sh.
2014/04/01 03:00:26
why?
iannucci
2014/04/01 03:45:14
Don't really want anyone directly inspecting these
| |
621 del opts.mode | |
622 | |
623 return opts, handler, test_globs | |
624 | |
625 | |
626 # Public | |
627 Result = namedtuple('Result', 'data') | |
628 | |
629 | |
630 _Test = namedtuple('Test', | |
631 'name func args kwargs expectdir expectbase fmt breakpoints') | |
Vadim Sh.
2014/04/01 03:00:26
nit: expect_dir, expect_base.
iannucci
2014/04/01 03:45:14
Done.
| |
632 | |
633 class Test(_Test): | |
634 def __new__(cls, name, func, args=(), kwargs=None, expectdir=None, | |
Vadim Sh.
2014/04/01 03:00:26
Doc strings for args. Esp. breakpoints and break_f
iannucci
2014/04/01 06:22:08
Done.
| |
635 expectbase=None, fmt='json', breakpoints=None, break_funcs=()): | |
636 kwargs = kwargs or {} | |
637 | |
638 breakpoints = breakpoints or [] | |
639 if not breakpoints or break_funcs: | |
640 for f in (break_funcs or (func,)): | |
Vadim Sh.
2014/04/01 03:00:26
Why 'or (func,)' here?
iannucci
2014/04/01 03:45:14
Want it do default it to breakpointing on the user
| |
641 if hasattr(f, 'im_func'): | |
Vadim Sh.
2014/04/01 03:00:26
What's im_func?
iannucci
2014/04/01 03:45:14
I stole this from the pdb implementation, but it's
| |
642 f = f.im_func | |
643 breakpoints.append((f.func_code.co_filename, | |
644 f.func_code.co_firstlineno, | |
645 f.func_code.co_name)) | |
iannucci
2014/04/01 01:47:08
Need to do this lookup right away since we can't b
| |
646 | |
647 return super(Test, cls).__new__(cls, name, func, args, kwargs, expectdir, | |
648 expectbase, fmt, breakpoints) | |
649 | |
650 def expect_path(self, fmt=None): | |
Vadim Sh.
2014/04/01 03:00:26
nit: fmt -> ext?
iannucci
2014/04/01 03:45:14
Done.
| |
651 name = self.expectbase or self.name | |
652 name = ''.join('_' if c in '<>:"\\/|?*\0' else c for c in name) | |
653 return os.path.join(self.expectdir, name + ('.%s' % (fmt or self.fmt))) | |
654 | |
655 def run(self): | |
656 return self.func(*self.args, **self.kwargs) | |
657 | |
658 | |
659 def main(test_gen, coverage_includes=None, coverage_omits=None, args=None): | |
Vadim Sh.
2014/04/01 03:00:26
Doc string.
iannucci
2014/04/01 06:22:08
Done.
| |
660 opts, handler, test_globs = parse_args(args) | |
661 result_handler = handler.ResultLoop(opts) | |
662 | |
663 kill_switch = multiprocessing.Event() | |
664 signal.signal(signal.SIGINT, lambda *_: kill_switch.set()) | |
Vadim Sh.
2014/04/01 03:00:26
what about SIGTERM?
iannucci
2014/04/01 03:45:14
Done.
| |
665 | |
666 if handler.SKIP_RUNLOOP: | |
667 coverage_opts = None | |
668 else: | |
669 coverage_opts = { | |
670 'include': coverage_includes, | |
671 'omit': coverage_omits, | |
672 'data_suffix': True | |
673 } | |
674 c = coverage.coverage(**coverage_opts) | |
675 c.erase() | |
676 cover_ctx = _cover(coverage_opts) | |
677 | |
678 test_queue = multiprocessing.Queue() | |
679 result_queue = multiprocessing.Queue() | |
680 | |
681 test_gen_args = ( | |
682 test_gen, test_queue, result_queue, opts.jobs, kill_switch, | |
683 test_globs, cover_ctx, handler.GenLoop(opts)) | |
684 | |
685 procs = [] | |
686 if handler.SKIP_RUNLOOP: | |
687 _GenLoopProcess(*test_gen_args) | |
688 else: | |
689 procs = [multiprocessing.Process( | |
690 target=_GenLoopProcess, args=test_gen_args)] | |
Vadim Sh.
2014/04/01 03:00:26
Maybe always run _GenLoopProcess in current proces
iannucci
2014/04/01 03:45:14
It could take a while though, and we want to start
| |
691 | |
692 procs += [ | |
693 multiprocessing.Process( | |
694 target=_RunLoopProcess, args=( | |
695 test_queue, result_queue, kill_switch, cover_ctx, | |
696 handler.RunLoop(opts))) | |
697 for _ in xrange(opts.jobs) | |
698 ] | |
699 | |
700 for p in procs: | |
701 p.daemon = True | |
702 p.start() | |
703 | |
704 error = False | |
705 while not kill_switch.is_set(): | |
706 while not kill_switch.is_set(): | |
707 try: | |
708 error |= result_handler(result_queue.get(timeout=0.1)) is False | |
Vadim Sh.
2014/04/01 03:00:26
'is False' is scetchy. Maybe 'is Failure' where Fa
iannucci
2014/04/01 03:45:14
Unfortunately the result is coming across a pickle
| |
709 except Queue.Empty: | |
710 break | |
711 | |
712 procs = [p for p in procs if p.is_alive()] | |
713 if not procs: | |
Vadim Sh.
2014/04/01 03:00:26
nit: if not any(p.is_alive() for p in procs):
iannucci
2014/04/01 03:45:14
Done.
| |
714 break | |
715 result_handler.finalize(kill_switch.is_set()) | |
716 | |
717 assert kill_switch.is_set() or result_queue.empty() | |
718 | |
719 if not handler.SKIP_RUNLOOP: | |
720 c.combine() | |
Vadim Sh.
2014/04/01 03:00:26
How does it work?
iannucci
2014/04/01 03:45:14
magic.... it looks for all files .coverage.hostnam
| |
721 if not kill_switch.is_set() and not opts.test_glob: | |
722 outf = StringIO() | |
723 total_covered = c.report(file=outf) | |
724 summary = outf.getvalue().replace('%- 15s' % 'Name', 'Coverage Report', 1) | |
725 if opts.verbose: | |
726 print | |
727 print summary | |
728 elif total_covered != 100.0: | |
729 print | |
730 lines = summary.splitlines() | |
731 lines[2:-2] = [l for l in lines[2:-2] | |
732 if not l.strip().endswith('100%')] | |
733 print '\n'.join(lines) | |
734 print | |
735 print 'FATAL: Recipes configs are not at 100% coverage.' | |
736 sys.exit(2) | |
737 | |
738 sys.exit(error or kill_switch.is_set()) | |
OLD | NEW |