Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(182)

Side by Side Diff: tools/isolate/run_test_cases.py

Issue 10831330: Repeat Failed Tests in Serial (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | tools/isolate/run_test_cases_smoke_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs each test cases as a single shard, single process execution. 6 """Runs each test cases as a single shard, single process execution.
7 7
8 Similar to sharding_supervisor.py but finer grained. Runs multiple instances in 8 Similar to sharding_supervisor.py but finer grained. Runs multiple instances in
9 parallel. 9 parallel.
10 """ 10 """
(...skipping 436 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 447
448 def list_test_cases(executable, index, shards, disabled, fails, flaky): 448 def list_test_cases(executable, index, shards, disabled, fails, flaky):
449 """Returns the list of test cases according to the specified criterias.""" 449 """Returns the list of test cases according to the specified criterias."""
450 tests = parse_gtest_cases(gtest_list_tests(executable)) 450 tests = parse_gtest_cases(gtest_list_tests(executable))
451 if shards: 451 if shards:
452 tests = filter_shards(tests, index, shards) 452 tests = filter_shards(tests, index, shards)
453 return filter_bad_tests(tests, disabled, fails, flaky) 453 return filter_bad_tests(tests, disabled, fails, flaky)
454 454
455 455
456 class Runner(object): 456 class Runner(object):
457 def __init__(self, executable, cwd_dir, timeout, progress): 457 def __init__(self, executable, cwd_dir, timeout, progress, retry_count=3):
458 # Constants 458 # Constants
459 self.executable = executable 459 self.executable = executable
460 self.cwd_dir = cwd_dir 460 self.cwd_dir = cwd_dir
461 self.timeout = timeout 461 self.timeout = timeout
462 self.progress = progress 462 self.progress = progress
463 self.retry_count = 3 463 self.retry_count = retry_count
464 # It is important to remove the shard environment variables since it could 464 # It is important to remove the shard environment variables since it could
465 # conflict with --gtest_filter. 465 # conflict with --gtest_filter.
466 self.env = os.environ.copy() 466 self.env = os.environ.copy()
467 for name in GTEST_ENV_VARS_TO_REMOVE: 467 for name in GTEST_ENV_VARS_TO_REMOVE:
468 self.env.pop(name, None) 468 self.env.pop(name, None)
469 # Forcibly enable color by default, if not already disabled. 469 # Forcibly enable color by default, if not already disabled.
470 self.env.setdefault('GTEST_COLOR', 'on') 470 self.env.setdefault('GTEST_COLOR', 'on')
471 471
472 def map(self, test_case): 472 def map(self, test_case):
473 """Traces a single test case and returns its output.""" 473 """Traces a single test case and returns its output."""
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
539 ] 539 ]
540 if whitelist: 540 if whitelist:
541 tests = [ 541 tests = [
542 t for t in tests if any(fnmatch.fnmatch(t, s) for s in whitelist) 542 t for t in tests if any(fnmatch.fnmatch(t, s) for s in whitelist)
543 ] 543 ]
544 logging.info( 544 logging.info(
545 'Found %d test cases in %s' % (len(tests), os.path.basename(executable))) 545 'Found %d test cases in %s' % (len(tests), os.path.basename(executable)))
546 return tests 546 return tests
547 547
548 548
549 def LogResults(result_file, results):
550 """Write the results out to a file if one is given."""
551 if not result_file:
552 return
553 with open(result_file, 'wb') as f:
554 json.dump(results, f, sort_keys=True, indent=2)
555
556
549 def run_test_cases(executable, test_cases, jobs, timeout, result_file): 557 def run_test_cases(executable, test_cases, jobs, timeout, result_file):
550 """Traces test cases one by one.""" 558 """Traces test cases one by one."""
551 progress = Progress(len(test_cases)) 559 progress = Progress(len(test_cases))
552 with ThreadPool(jobs) as pool: 560 with ThreadPool(jobs) as pool:
553 function = Runner(executable, os.getcwd(), timeout, progress).map 561 function = Runner(executable, os.getcwd(), timeout, progress).map
554 for test_case in test_cases: 562 for test_case in test_cases:
555 pool.add_task(function, test_case) 563 pool.add_task(function, test_case)
556 results = pool.join(progress, 0.1) 564 results = pool.join(progress, 0.1)
557 duration = time.time() - progress.start 565 duration = time.time() - progress.start
558 results = dict((item[0]['test_case'], item) for item in results) 566 results = dict((item[0]['test_case'], item) for item in results)
559 if result_file: 567 LogResults(result_file, results)
560 with open(result_file, 'wb') as f:
561 json.dump(results, f, sort_keys=True, indent=2)
562 sys.stdout.write('\n') 568 sys.stdout.write('\n')
563 total = len(results) 569 total = len(results)
564 if not total: 570 if not total:
565 return 1 571 return 1
566 572
567 # Classify the results 573 # Classify the results
568 success = [] 574 success = []
569 flaky = [] 575 flaky = []
570 fail = [] 576 fail = []
571 nb_runs = 0 577 nb_runs = 0
572 for test_case in sorted(results): 578 for test_case in sorted(results):
573 items = results[test_case] 579 items = results[test_case]
574 nb_runs += len(items) 580 nb_runs += len(items)
575 if not any(not i['returncode'] for i in items): 581 if not any(not i['returncode'] for i in items):
576 fail.append(test_case) 582 fail.append(test_case)
577 elif len(items) > 1 and any(not i['returncode'] for i in items): 583 elif len(items) > 1 and any(not i['returncode'] for i in items):
578 flaky.append(test_case) 584 flaky.append(test_case)
579 elif len(items) == 1 and items[0]['returncode'] == 0: 585 elif len(items) == 1 and items[0]['returncode'] == 0:
580 success.append(test_case) 586 success.append(test_case)
581 else: 587 else:
582 assert False, items 588 assert False, items
583 589
590 # Retry all the failures serially to see if they are just flaky when
591 # run at the same time.
592 if fail:
593 print 'Retrying failed tests serially.'
594 progress = Progress(len(fail))
595 function = Runner(
596 executable, os.getcwd(), timeout, progress, retry_count=1).map
597 test_cases_retry = fail[:]
598
599 for test_case in test_cases_retry:
600 output = function(test_case)
601 progress.print_update()
602 results[output[0]['test_case']].append(output)
603 if not output[0]['returncode']:
604 fail.remove(test_case)
605 flaky.append(test_case)
606
607 LogResults(result_file, results)
608 sys.stdout.write('\n')
609
610 print 'Summary:'
584 for test_case in sorted(flaky): 611 for test_case in sorted(flaky):
585 items = results[test_case] 612 items = results[test_case]
586 print '%s is flaky (tried %d times)' % (test_case, len(items)) 613 print '%s is flaky (tried %d times)' % (test_case, len(items))
587 614
615 for test_case in sorted(fail):
616 print '%s failed' % (test_case)
617
588 print 'Success: %4d %5.2f%%' % (len(success), len(success) * 100. / total) 618 print 'Success: %4d %5.2f%%' % (len(success), len(success) * 100. / total)
589 print 'Flaky: %4d %5.2f%%' % (len(flaky), len(flaky) * 100. / total) 619 print 'Flaky: %4d %5.2f%%' % (len(flaky), len(flaky) * 100. / total)
590 print 'Fail: %4d %5.2f%%' % (len(fail), len(fail) * 100. / total) 620 print 'Fail: %4d %5.2f%%' % (len(fail), len(fail) * 100. / total)
591 print '%.1fs Done running %d tests with %d executions. %.1f test/s' % ( 621 print '%.1fs Done running %d tests with %d executions. %.1f test/s' % (
592 duration, 622 duration,
593 len(results), 623 len(results),
594 nb_runs, 624 nb_runs,
595 nb_runs / duration) 625 nb_runs / duration)
596 return int(bool(fail)) 626 return int(bool(fail))
597 627
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
722 return run_test_cases( 752 return run_test_cases(
723 executable, 753 executable,
724 test_cases, 754 test_cases,
725 options.jobs, 755 options.jobs,
726 options.timeout, 756 options.timeout,
727 result_file) 757 result_file)
728 758
729 759
730 if __name__ == '__main__': 760 if __name__ == '__main__':
731 sys.exit(main(sys.argv[1:])) 761 sys.exit(main(sys.argv[1:]))
OLDNEW
« no previous file with comments | « no previous file | tools/isolate/run_test_cases_smoke_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698