OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """A tool to run a chrome test executable, used by the buildbot slaves. | 6 """A tool to run a chrome test executable, used by the buildbot slaves. |
7 | 7 |
8 When this is run, the current directory (cwd) should be the outer build | 8 When this is run, the current directory (cwd) should be the outer build |
9 directory (e.g., chrome-release/build/). | 9 directory (e.g., chrome-release/build/). |
10 | 10 |
(...skipping 24 matching lines...) Expand all Loading... | |
35 # Because of this dependency on a chromium checkout, we need to disable some | 35 # Because of this dependency on a chromium checkout, we need to disable some |
36 # pylint checks. | 36 # pylint checks. |
37 # pylint: disable=E0611 | 37 # pylint: disable=E0611 |
38 # pylint: disable=E1101 | 38 # pylint: disable=E1101 |
39 from common import chromium_utils | 39 from common import chromium_utils |
40 from common import gtest_utils | 40 from common import gtest_utils |
41 import config | 41 import config |
42 from slave import crash_utils | 42 from slave import crash_utils |
43 from slave import gtest_slave_utils | 43 from slave import gtest_slave_utils |
44 from slave import process_log_utils | 44 from slave import process_log_utils |
45 from slave import results_dashboard | |
45 from slave import slave_utils | 46 from slave import slave_utils |
46 from slave import xvfb | 47 from slave import xvfb |
47 from slave.gtest.json_results_generator import GetSvnRevision | 48 from slave.gtest.json_results_generator import GetSvnRevision |
48 | 49 |
49 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) | 50 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) |
50 | 51 |
51 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' | 52 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' |
52 | 53 |
53 DEST_DIR = 'gtest_results' | 54 DEST_DIR = 'gtest_results' |
54 | 55 |
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
339 build_property=options.build_properties, | 340 build_property=options.build_properties, |
340 factory_properties=options.factory_properties, | 341 factory_properties=options.factory_properties, |
341 webkit_revision=webkit_revision) | 342 webkit_revision=webkit_revision) |
342 | 343 |
343 if options.annotate and options.generate_json_file: | 344 if options.annotate and options.generate_json_file: |
344 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) | 345 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) |
345 | 346 |
346 return tracker_obj | 347 return tracker_obj |
347 | 348 |
348 | 349 |
350 def send_results_to_dashboard(results_tracker, master, system, test, url): | |
351 for logname, log in results_tracker.PerformanceLogs().iteritems(): | |
352 lines = [str(l).rstrip() for l in log] | |
353 results_dashboard.SendResults(logname, lines, master, system, test, url) | |
354 | |
355 | |
349 def annotate(test_name, result, results_tracker, full_name=False, | 356 def annotate(test_name, result, results_tracker, full_name=False, |
350 perf_dashboard_id=None): | 357 perf_dashboard_id=None): |
351 """Given a test result and tracker, update the waterfall with test results.""" | 358 """Given a test result and tracker, update the waterfall with test results.""" |
352 get_text_result = process_log_utils.SUCCESS | 359 get_text_result = process_log_utils.SUCCESS |
353 | 360 |
354 for failure in sorted(results_tracker.FailedTests()): | 361 for failure in sorted(results_tracker.FailedTests()): |
355 if full_name: | 362 if full_name: |
356 testabbr = re.sub(r'[^\w\.\-]', '_', failure) | 363 testabbr = re.sub(r'[^\w\.\-]', '_', failure) |
357 else: | 364 else: |
358 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) | 365 testabbr = re.sub(r'[^\w\.\-]', '_', failure.split('.')[-1]) |
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
537 | 544 |
538 if options.generate_json_file: | 545 if options.generate_json_file: |
539 _GenerateJSONForTestResults(options, results_tracker) | 546 _GenerateJSONForTestResults(options, results_tracker) |
540 | 547 |
541 if options.annotate: | 548 if options.annotate: |
542 annotate(options.test_type, result, results_tracker, | 549 annotate(options.test_type, result, results_tracker, |
543 options.factory_properties.get('full_test_name'), | 550 options.factory_properties.get('full_test_name'), |
544 perf_dashboard_id=options.factory_properties.get( | 551 perf_dashboard_id=options.factory_properties.get( |
545 'test_name')) | 552 'test_name')) |
546 | 553 |
554 if options.results_url: | |
555 send_results_to_dashboard( | |
556 results_tracker, options.factory_properties.get('master'), | |
Mike Stip (use stip instead)
2013/02/23 00:23:21
nit: I'd prefer masterName or master_name
sullivan
2013/02/25 22:18:29
Done.
| |
557 options.factory_properties.get('perf_id'), options.test_type, | |
558 options.results_url) | |
559 | |
547 return result | 560 return result |
548 | 561 |
549 | 562 |
550 def main_ios(options, args): | 563 def main_ios(options, args): |
551 if len(args) < 1: | 564 if len(args) < 1: |
552 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 565 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
553 | 566 |
554 def kill_simulator(): | 567 def kill_simulator(): |
555 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) | 568 chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator']) |
556 | 569 |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
783 | 796 |
784 if options.generate_json_file: | 797 if options.generate_json_file: |
785 _GenerateJSONForTestResults(options, results_tracker) | 798 _GenerateJSONForTestResults(options, results_tracker) |
786 | 799 |
787 if options.annotate: | 800 if options.annotate: |
788 annotate(options.test_type, result, results_tracker, | 801 annotate(options.test_type, result, results_tracker, |
789 options.factory_properties.get('full_test_name'), | 802 options.factory_properties.get('full_test_name'), |
790 perf_dashboard_id=options.factory_properties.get( | 803 perf_dashboard_id=options.factory_properties.get( |
791 'test_name')) | 804 'test_name')) |
792 | 805 |
806 if options.results_url: | |
807 send_results_to_dashboard( | |
808 results_tracker, options.factory_properties.get('master'), | |
809 options.factory_properties.get('perf_id'), options.test_type, | |
810 options.results_url) | |
811 | |
793 return result | 812 return result |
794 | 813 |
795 | 814 |
796 def main_win(options, args): | 815 def main_win(options, args): |
797 """Using the target build configuration, run the executable given in the | 816 """Using the target build configuration, run the executable given in the |
798 first non-option argument, passing any following arguments to that | 817 first non-option argument, passing any following arguments to that |
799 executable. | 818 executable. |
800 """ | 819 """ |
801 if len(args) < 1: | 820 if len(args) < 1: |
802 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 821 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
857 | 876 |
858 if options.generate_json_file: | 877 if options.generate_json_file: |
859 _GenerateJSONForTestResults(options, results_tracker) | 878 _GenerateJSONForTestResults(options, results_tracker) |
860 | 879 |
861 if options.annotate: | 880 if options.annotate: |
862 annotate(options.test_type, result, results_tracker, | 881 annotate(options.test_type, result, results_tracker, |
863 options.factory_properties.get('full_test_name'), | 882 options.factory_properties.get('full_test_name'), |
864 perf_dashboard_id=options.factory_properties.get( | 883 perf_dashboard_id=options.factory_properties.get( |
865 'test_name')) | 884 'test_name')) |
866 | 885 |
886 if options.results_url: | |
887 send_results_to_dashboard( | |
888 results_tracker, options.factory_properties.get('master'), | |
889 options.factory_properties.get('perf_id'), options.test_type, | |
890 options.results_url) | |
891 | |
867 return result | 892 return result |
868 | 893 |
869 | 894 |
870 def main(): | 895 def main(): |
871 import platform | 896 import platform |
872 | 897 |
873 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', | 898 xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', |
874 'third_party', 'xvfb', platform.architecture()[0]) | 899 'third_party', 'xvfb', platform.architecture()[0]) |
875 | 900 |
876 # Initialize logging. | 901 # Initialize logging. |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
958 option_parser.add_option('', '--test-type', default='', | 983 option_parser.add_option('', '--test-type', default='', |
959 help='The test name that identifies the test, ' | 984 help='The test name that identifies the test, ' |
960 'e.g. \'unit-tests\'') | 985 'e.g. \'unit-tests\'') |
961 option_parser.add_option('', '--test-results-server', default='', | 986 option_parser.add_option('', '--test-results-server', default='', |
962 help='The test results server to upload the ' | 987 help='The test results server to upload the ' |
963 'results.') | 988 'results.') |
964 option_parser.add_option('', '--annotate', default='', | 989 option_parser.add_option('', '--annotate', default='', |
965 help='Annotate output when run as a buildstep. ' | 990 help='Annotate output when run as a buildstep. ' |
966 'Specify which type of test to parse, available' | 991 'Specify which type of test to parse, available' |
967 ' types listed with --annotate=list.') | 992 ' types listed with --annotate=list.') |
993 option_parser.add_option('', '--results-url', default='', | |
994 help='The URI of the perf dashboard to upload ' | |
995 'results to.') | |
968 chromium_utils.AddPropertiesOptions(option_parser) | 996 chromium_utils.AddPropertiesOptions(option_parser) |
969 options, args = option_parser.parse_args() | 997 options, args = option_parser.parse_args() |
970 | 998 |
971 options.test_type = options.test_type or options.factory_properties.get( | 999 options.test_type = options.test_type or options.factory_properties.get( |
972 'step_name') | 1000 'step_name') |
973 | 1001 |
974 if options.run_shell_script and options.run_python_script: | 1002 if options.run_shell_script and options.run_python_script: |
975 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' | 1003 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' |
976 'not both.') | 1004 'not both.') |
977 return 1 | 1005 return 1 |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1038 '%d new files were left in %s: Fix the tests to clean up themselves.' | 1066 '%d new files were left in %s: Fix the tests to clean up themselves.' |
1039 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) | 1067 ) % ((new_temp_files - temp_files), tempfile.gettempdir()) |
1040 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all | 1068 # TODO(maruel): Make it an error soon. Not yet since I want to iron out all |
1041 # the remaining cases before. | 1069 # the remaining cases before. |
1042 #result = 1 | 1070 #result = 1 |
1043 return result | 1071 return result |
1044 | 1072 |
1045 | 1073 |
1046 if '__main__' == __name__: | 1074 if '__main__' == __name__: |
1047 sys.exit(main()) | 1075 sys.exit(main()) |
OLD | NEW |