OLD | NEW |
(Empty) | |
| 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 """Runs a perf test on a single device. |
| 6 |
| 7 Our buildbot infrastructure requires each slave to run steps serially. |
| 8 This is sub-optimal for android, where these steps can run independently on |
| 9 multiple connected devices. |
| 10 |
| 11 The buildbots will run this script multiple times per cycle: |
| 12 - First: all steps listed in --steps in will be executed in parallel using all |
| 13 connected devices. Step results will be pickled to disk. Each step has a unique |
| 14 name. The result code will be ignored if the step name is listed in |
| 15 --flaky-steps. |
| 16 The buildbot will treat this step as a regular step, and will not process any |
| 17 graph data. |
| 18 |
| 19 - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file |
| 20 with the step results previously saved. The buildbot will then process the graph |
| 21 data accordingly. |
| 22 |
| 23 The JSON steps file contains a dictionary in the format: |
| 24 { |
| 25 "step_name_foo": "script_to_execute foo", |
| 26 "step_name_bar": "script_to_execute bar" |
| 27 } |
| 28 |
| 29 The JSON flaky steps file contains a list with step names which results should |
| 30 be ignored: |
| 31 [ |
| 32 "step_name_foo", |
| 33 "step_name_bar" |
| 34 ] |
| 35 |
| 36 Note that script_to_execute necessarily have to take at least the following |
| 37 options: |
| 38 --device: the serial number to be passed to all adb commands. |
| 39 --keep_test_server_ports: indicates it's being run as a shard, and shouldn't |
| 40 reset test server port allocation. |
| 41 """ |
| 42 |
| 43 import datetime |
| 44 import logging |
| 45 import pexpect |
| 46 import pickle |
| 47 import os |
| 48 import sys |
| 49 import time |
| 50 |
| 51 from pylib import constants |
| 52 from pylib.base import base_test_result |
| 53 from pylib.base import base_test_runner |
| 54 |
| 55 |
| 56 _OUTPUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out', 'step_results') |
| 57 |
| 58 |
| 59 def PrintTestOutput(test_name): |
| 60 """Helper method to print the output of previously executed test_name. |
| 61 |
| 62 Args: |
| 63 test_name: name of the test that has been previously executed. |
| 64 |
| 65 Returns: |
| 66 exit code generated by the test step. |
| 67 """ |
| 68 file_name = os.path.join(_OUTPUT_DIR, test_name) |
| 69 if not os.path.exists(file_name): |
| 70 logging.error('File not found %s', file_name) |
| 71 return 1 |
| 72 |
| 73 with file(file_name, 'r') as f: |
| 74 persisted_result = pickle.loads(f.read()) |
| 75 print persisted_result['output'] |
| 76 |
| 77 return persisted_result['exit_code'] |
| 78 |
| 79 |
| 80 class TestRunner(base_test_runner.BaseTestRunner): |
| 81 def __init__(self, test_options, device, tests, flaky_tests): |
| 82 """A TestRunner instance runs a perf test on a single device. |
| 83 |
| 84 Args: |
| 85 test_options: A PerfOptions object. |
| 86 device: Device to run the tests. |
| 87 tests: a dict mapping test_name to command. |
| 88 flaky_tests: a list of flaky test_name. |
| 89 """ |
| 90 super(TestRunner, self).__init__(device, None, 'Release') |
| 91 self._options = test_options |
| 92 self._tests = tests |
| 93 self._flaky_tests = flaky_tests |
| 94 |
| 95 @staticmethod |
| 96 def _SaveResult(result): |
| 97 with file(os.path.join(_OUTPUT_DIR, result['name']), 'w') as f: |
| 98 f.write(pickle.dumps(result)) |
| 99 |
| 100 def _LaunchPerfTest(self, test_name): |
| 101 """Runs a perf test. |
| 102 |
| 103 Args: |
| 104 test_name: the name of the test to be executed. |
| 105 |
| 106 Returns: |
| 107 A tuple containing (Output, base_test_result.ResultType) |
| 108 """ |
| 109 cmd = ('%s --device %s --keep_test_server_ports' % |
| 110 (self._tests[test_name], self.device)) |
| 111 start_time = datetime.datetime.now() |
| 112 output, exit_code = pexpect.run( |
| 113 cmd, cwd=os.path.abspath(constants.DIR_SOURCE_ROOT), |
| 114 withexitstatus=True, logfile=sys.stdout, timeout=1800, |
| 115 env=os.environ) |
| 116 end_time = datetime.datetime.now() |
| 117 logging.info('%s : exit_code=%d in %d secs at %s', |
| 118 test_name, exit_code, (end_time - start_time).seconds, |
| 119 self.device) |
| 120 result_type = base_test_result.ResultType.FAIL |
| 121 if exit_code == 0: |
| 122 result_type = base_test_result.ResultType.PASS |
| 123 if test_name in self._flaky_tests: |
| 124 exit_code = 0 |
| 125 result_type = base_test_result.ResultType.PASS |
| 126 |
| 127 persisted_result = { |
| 128 'name': test_name, |
| 129 'output': output, |
| 130 'exit_code': exit_code, |
| 131 'result_type': result_type, |
| 132 'total_time': (end_time - start_time).seconds, |
| 133 'device': self.device, |
| 134 } |
| 135 self._SaveResult(persisted_result) |
| 136 |
| 137 return (output, result_type) |
| 138 |
| 139 def RunTest(self, test_name): |
| 140 """Run a perf test on the device. |
| 141 |
| 142 Args: |
| 143 test_name: String to use for logging the test result. |
| 144 |
| 145 Returns: |
| 146 A tuple of (TestRunResults, retry). |
| 147 """ |
| 148 output, result_type = self._LaunchPerfTest(test_name) |
| 149 results = base_test_result.TestRunResults() |
| 150 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
| 151 retry = None |
| 152 if not results.DidRunPass(): |
| 153 retry = test_name |
| 154 return results, retry |
OLD | NEW |