Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: tools/isolate/run_test_cases.py

Issue 10825049: run_test_cases.py: Enable use of RUN_TEST_CASES_RESULT_FILE as environment variable. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « tools/isolate/fix_test_cases.py ('k') | tools/isolate/run_test_cases_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs each test cases as a single shard, single process execution. 6 """Runs each test cases as a single shard, single process execution.
7 7
8 Similar to sharding_supervisor.py but finer grained. Runs multiple instances in 8 Similar to sharding_supervisor.py but finer grained. Runs multiple instances in
9 parallel. 9 parallel.
10 """ 10 """
11 11
12 import fnmatch 12 import fnmatch
13 import json 13 import json
14 import logging 14 import logging
15 import optparse 15 import optparse
16 import os 16 import os
17 import Queue 17 import Queue
18 import subprocess 18 import subprocess
19 import sys 19 import sys
20 import threading 20 import threading
21 import time 21 import time
22 22
23 23
24 # These are known to influence the way the output is generated.
25 KNOWN_GTEST_ENV_VARS = [
26 'GTEST_ALSO_RUN_DISABLED_TESTS',
27 'GTEST_BREAK_ON_FAILURE',
28 'GTEST_CATCH_EXCEPTIONS',
29 'GTEST_COLOR',
30 'GTEST_FILTER',
31 'GTEST_OUTPUT',
32 'GTEST_PRINT_TIME',
33 'GTEST_RANDOM_SEED',
34 'GTEST_REPEAT',
35 'GTEST_SHARD_INDEX',
36 'GTEST_SHARD_STATUS_FILE',
37 'GTEST_SHUFFLE',
38 'GTEST_THROW_ON_FAILURE',
39 'GTEST_TOTAL_SHARDS',
40 ]
41
42 # These needs to be poped out before running a test.
43 GTEST_ENV_VARS_TO_REMOVE = [
44 # TODO(maruel): Handle.
45 'GTEST_ALSO_RUN_DISABLED_TESTS',
46 'GTEST_FILTER',
47 # TODO(maruel): Handle.
48 'GTEST_OUTPUT',
49 # TODO(maruel): Handle.
50 'GTEST_RANDOM_SEED',
51 # TODO(maruel): Handle.
52 'GTEST_REPEAT',
53 'GTEST_SHARD_INDEX',
54 # TODO(maruel): Handle.
55 'GTEST_SHUFFLE',
56 'GTEST_TOTAL_SHARDS',
57 ]
58
59
24 def num_processors(): 60 def num_processors():
25 """Returns the number of processors. 61 """Returns the number of processors.
26 62
27 Python on OSX 10.6 raises a NotImplementedError exception. 63 Python on OSX 10.6 raises a NotImplementedError exception.
28 """ 64 """
29 try: 65 try:
30 # Multiprocessing 66 # Multiprocessing
31 import multiprocessing 67 import multiprocessing
32 return multiprocessing.cpu_count() 68 return multiprocessing.cpu_count()
33 except: # pylint: disable=W0702 69 except: # pylint: disable=W0702
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after
421 def __init__(self, executable, cwd_dir, timeout, progress): 457 def __init__(self, executable, cwd_dir, timeout, progress):
422 # Constants 458 # Constants
423 self.executable = executable 459 self.executable = executable
424 self.cwd_dir = cwd_dir 460 self.cwd_dir = cwd_dir
425 self.timeout = timeout 461 self.timeout = timeout
426 self.progress = progress 462 self.progress = progress
427 self.retry_count = 3 463 self.retry_count = 3
428 # It is important to remove the shard environment variables since it could 464 # It is important to remove the shard environment variables since it could
429 # conflict with --gtest_filter. 465 # conflict with --gtest_filter.
430 self.env = os.environ.copy() 466 self.env = os.environ.copy()
431 self.env.pop('GTEST_SHARD_INDEX', None) 467 for name in GTEST_ENV_VARS_TO_REMOVE:
432 self.env.pop('GTEST_TOTAL_SHARDS', None) 468 self.env.pop(name, None)
469 # Forcibly enable color by default, if not already disabled.
470 self.env.setdefault('GTEST_COLOR', 'on')
433 471
434 def map(self, test_case): 472 def map(self, test_case):
435 """Traces a single test case and returns its output.""" 473 """Traces a single test case and returns its output."""
436 cmd = [self.executable, '--gtest_filter=%s' % test_case] 474 cmd = [self.executable, '--gtest_filter=%s' % test_case]
437 cmd = fix_python_path(cmd) 475 cmd = fix_python_path(cmd)
438 out = [] 476 out = []
439 for retry in range(self.retry_count): 477 for retry in range(self.retry_count):
440 start = time.time() 478 start = time.time()
441 output, returncode = call_with_timeout( 479 output, returncode = call_with_timeout(
442 cmd, 480 cmd,
443 self.timeout, 481 self.timeout,
444 cwd=self.cwd_dir, 482 cwd=self.cwd_dir,
445 stderr=subprocess.STDOUT, 483 stderr=subprocess.STDOUT,
446 env=self.env) 484 env=self.env)
447 duration = time.time() - start 485 duration = time.time() - start
448 data = { 486 data = {
449 'test_case': test_case, 487 'test_case': test_case,
450 'returncode': returncode, 488 'returncode': returncode,
451 'duration': duration, 489 'duration': duration,
452 'output': output, 490 # It needs to be valid utf-8 otherwise it can't be store.
491 'output': output.decode('ascii', 'ignore').encode('utf-8'),
453 } 492 }
454 if '[ RUN ]' not in output: 493 if '[ RUN ]' not in output:
455 # Can't find gtest marker, mark it as invalid. 494 # Can't find gtest marker, mark it as invalid.
456 returncode = returncode or 1 495 returncode = returncode or 1
457 out.append(data) 496 out.append(data)
458 if sys.platform == 'win32': 497 if sys.platform == 'win32':
459 output = output.replace('\r\n', '\n') 498 output = output.replace('\r\n', '\n')
460 size = returncode and retry != self.retry_count - 1 499 size = returncode and retry != self.retry_count - 1
461 if retry: 500 if retry:
462 self.progress.update_item( 501 self.progress.update_item(
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
500 ] 539 ]
501 if whitelist: 540 if whitelist:
502 tests = [ 541 tests = [
503 t for t in tests if any(fnmatch.fnmatch(t, s) for s in whitelist) 542 t for t in tests if any(fnmatch.fnmatch(t, s) for s in whitelist)
504 ] 543 ]
505 logging.info( 544 logging.info(
506 'Found %d test cases in %s' % (len(tests), os.path.basename(executable))) 545 'Found %d test cases in %s' % (len(tests), os.path.basename(executable)))
507 return tests 546 return tests
508 547
509 548
510 def run_test_cases(executable, test_cases, jobs, timeout, no_dump): 549 def run_test_cases(executable, test_cases, jobs, timeout, result_file):
511 """Traces test cases one by one.""" 550 """Traces test cases one by one."""
512 progress = Progress(len(test_cases)) 551 progress = Progress(len(test_cases))
513 with ThreadPool(jobs) as pool: 552 with ThreadPool(jobs) as pool:
514 function = Runner(executable, os.getcwd(), timeout, progress).map 553 function = Runner(executable, os.getcwd(), timeout, progress).map
515 for test_case in test_cases: 554 for test_case in test_cases:
516 pool.add_task(function, test_case) 555 pool.add_task(function, test_case)
517 results = pool.join(progress, 0.1) 556 results = pool.join(progress, 0.1)
518 duration = time.time() - progress.start 557 duration = time.time() - progress.start
519 results = dict((item[0]['test_case'], item) for item in results) 558 results = dict((item[0]['test_case'], item) for item in results)
520 if not no_dump: 559 if result_file:
521 with open('%s.run_test_cases' % executable, 'wb') as f: 560 with open(result_file, 'wb') as f:
522 json.dump(results, f, sort_keys=True, indent=2) 561 json.dump(results, f, sort_keys=True, indent=2)
523 sys.stdout.write('\n') 562 sys.stdout.write('\n')
524 total = len(results) 563 total = len(results)
525 if not total: 564 if not total:
526 return 1 565 return 1
527 566
528 # Classify the results 567 # Classify the results
529 success = [] 568 success = []
530 flaky = [] 569 flaky = []
531 fail = [] 570 fail = []
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
576 default=120, 615 default=120,
577 help='Timeout for a single test case, in seconds default:%default') 616 help='Timeout for a single test case, in seconds default:%default')
578 parser.add_option( 617 parser.add_option(
579 '-v', '--verbose', 618 '-v', '--verbose',
580 action='count', 619 action='count',
581 default=int(os.environ.get('ISOLATE_DEBUG', 0)), 620 default=int(os.environ.get('ISOLATE_DEBUG', 0)),
582 help='Use multiple times') 621 help='Use multiple times')
583 parser.add_option( 622 parser.add_option(
584 '--no-dump', 623 '--no-dump',
585 action='store_true', 624 action='store_true',
586 help='do not generate a .test_cases file') 625 help='do not generate a .run_test_cases file')
626 parser.add_option(
627 '--result',
628 default=os.environ.get('RUN_TEST_CASES_RESULT_FILE', ''),
629 help='Override the default name of the generated .run_test_cases file')
587 630
588 group = optparse.OptionGroup(parser, 'Which test cases to run') 631 group = optparse.OptionGroup(parser, 'Which test cases to run')
589 group.add_option( 632 group.add_option(
590 '-w', '--whitelist', 633 '-w', '--whitelist',
591 default=[], 634 default=[],
592 action='append', 635 action='append',
593 help='filter to apply to test cases to run, wildcard-style, defaults to ' 636 help='filter to apply to test cases to run, wildcard-style, defaults to '
594 'all test') 637 'all test')
595 group.add_option( 638 group.add_option(
596 '-b', '--blacklist', 639 '-b', '--blacklist',
597 default=[], 640 default=[],
598 action='append', 641 action='append',
599 help='filter to apply to test cases to skip, wildcard-style, defaults to ' 642 help='filter to apply to test cases to skip, wildcard-style, defaults to '
600 'no test') 643 'no test')
601 group.add_option( 644 group.add_option(
602 '-i', '--index', 645 '-i', '--index',
603 type='int', 646 type='int',
604 default=as_digit(os.environ.get('GTEST_SHARD_INDEX', ''), None), 647 default=as_digit(os.environ.get('GTEST_SHARD_INDEX', ''), None),
605 help='Shard index to run') 648 help='Shard index to run')
606 group.add_option( 649 group.add_option(
607 '-s', '--shards', 650 '-s', '--shards',
608 type='int', 651 type='int',
609 default=as_digit(os.environ.get('GTEST_TOTAL_SHARDS', ''), None), 652 default=as_digit(os.environ.get('GTEST_TOTAL_SHARDS', ''), None),
610 help='Total number of shards to calculate from the --index to run') 653 help='Total number of shards to calculate from the --index to run')
611 group.add_option( 654 group.add_option(
612 '-T', '--test-case-file', 655 '-T', '--test-case-file',
613 help='File containing the exact list of test cases to run') 656 help='File containing the exact list of test cases to run')
614 group.add_option( 657 group.add_option(
615 '--gtest_filter', 658 '--gtest_filter',
659 default=os.environ.get('GTEST_FILTER', ''),
616 help='Runs a single test, provideded to keep compatibility with ' 660 help='Runs a single test, provideded to keep compatibility with '
617 'other tools') 661 'other tools')
618 parser.add_option_group(group) 662 parser.add_option_group(group)
619 options, args = parser.parse_args(argv) 663 options, args = parser.parse_args(argv)
620 664
621 levels = [logging.ERROR, logging.INFO, logging.DEBUG] 665 levels = [logging.ERROR, logging.INFO, logging.DEBUG]
622 logging.basicConfig( 666 logging.basicConfig(
623 level=levels[min(len(levels)-1, options.verbose)], 667 level=levels[min(len(levels)-1, options.verbose)],
624 format='%(levelname)5s %(module)15s(%(lineno)3d): %(message)s') 668 format='%(levelname)5s %(module)15s(%(lineno)3d): %(message)s')
625 669
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
658 test_cases = get_test_cases( 702 test_cases = get_test_cases(
659 executable, 703 executable,
660 options.whitelist, 704 options.whitelist,
661 options.blacklist, 705 options.blacklist,
662 options.index, 706 options.index,
663 options.shards) 707 options.shards)
664 708
665 if not test_cases: 709 if not test_cases:
666 return 0 710 return 0
667 711
712 if options.no_dump:
713 result_file = None
714 else:
715 if options.result:
716 result_file = options.result
717 else:
718 result_file = '%s.run_test_cases' % executable
719
668 return run_test_cases( 720 return run_test_cases(
669 executable, 721 executable,
670 test_cases, 722 test_cases,
671 options.jobs, 723 options.jobs,
672 options.timeout, 724 options.timeout,
673 options.no_dump) 725 result_file)
674 726
675 727
676 if __name__ == '__main__': 728 if __name__ == '__main__':
677 sys.exit(main(sys.argv[1:])) 729 sys.exit(main(sys.argv[1:]))
OLDNEW
« no previous file with comments | « tools/isolate/fix_test_cases.py ('k') | tools/isolate/run_test_cases_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698