| OLD | NEW |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Class for running instrumentation tests on a single device.""" | 5 """Class for running instrumentation tests on a single device.""" |
| 6 | 6 |
| 7 import logging | 7 import logging |
| 8 import os | 8 import os |
| 9 import re | 9 import re |
| 10 import shutil | |
| 11 import sys | |
| 12 import time | 10 import time |
| 13 | 11 |
| 14 from pylib import android_commands | 12 from pylib import android_commands |
| 15 from pylib import cmd_helper | |
| 16 from pylib import constants | 13 from pylib import constants |
| 17 from pylib import json_perf_parser | 14 from pylib import json_perf_parser |
| 18 from pylib import perf_tests_helper | 15 from pylib import perf_tests_helper |
| 19 from pylib import valgrind_tools | 16 from pylib import valgrind_tools |
| 20 from pylib.base import base_test_result | 17 from pylib.base import base_test_result |
| 21 from pylib.base import base_test_runner | 18 from pylib.base import base_test_runner |
| 22 | 19 |
| 23 import test_result | 20 import test_result |
| 24 | 21 |
| 25 | 22 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 45 | 42 |
| 46 class TestRunner(base_test_runner.BaseTestRunner): | 43 class TestRunner(base_test_runner.BaseTestRunner): |
| 47 """Responsible for running a series of tests connected to a single device.""" | 44 """Responsible for running a series of tests connected to a single device.""" |
| 48 | 45 |
| 49 _DEVICE_DATA_DIR = 'chrome/test/data' | 46 _DEVICE_DATA_DIR = 'chrome/test/data' |
| 50 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | 47 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' |
| 51 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | 48 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + |
| 52 '/chrome-profile*') | 49 '/chrome-profile*') |
| 53 _DEVICE_HAS_TEST_FILES = {} | 50 _DEVICE_HAS_TEST_FILES = {} |
| 54 | 51 |
| 55 def __init__(self, build_type, test_data, save_perf_json, screenshot_failures, | 52 def __init__(self, test_options, device, shard_index, test_pkg, |
| 56 tool, wait_for_debugger, disable_assertions, push_deps, | |
| 57 cleanup_test_files, device, shard_index, test_pkg, | |
| 58 ports_to_forward): | 53 ports_to_forward): |
| 59 """Create a new TestRunner. | 54 """Create a new TestRunner. |
| 60 | 55 |
| 61 Args: | 56 Args: |
| 62 build_type: 'Release' or 'Debug'. | 57 test_options: An InstrumentationOptions object. |
| 63 test_data: Location of the test data. | |
| 64 save_perf_json: Whether or not to save the JSON file from UI perf tests. | |
| 65 screenshot_failures: Take a screenshot for a test failure | |
| 66 tool: Name of the Valgrind tool. | |
| 67 wait_for_debugger: Blocks until the debugger is connected. | |
| 68 disable_assertions: Whether to disable java assertions on the device. | |
| 69 push_deps: If True, push all dependencies to the device. | |
| 70 cleanup_test_files: Whether or not to cleanup test files on device. | |
| 71 device: Attached android device. | 58 device: Attached android device. |
| 72 shard_index: Shard index. | 59 shard_index: Shard index. |
| 73 test_pkg: A TestPackage object. | 60 test_pkg: A TestPackage object. |
| 74 ports_to_forward: A list of port numbers for which to set up forwarders. | 61 ports_to_forward: A list of port numbers for which to set up forwarders. |
| 75 Can be optionally requested by a test case. | 62 Can be optionally requested by a test case. |
| 76 """ | 63 """ |
| 77 super(TestRunner, self).__init__(device, tool, build_type, push_deps, | 64 super(TestRunner, self).__init__(device, test_options.tool, |
| 78 cleanup_test_files) | 65 test_options.build_type, |
| 66 test_options.push_deps, |
| 67 test_options.cleanup_test_files) |
| 79 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index | 68 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index |
| 80 | 69 |
| 81 self.build_type = build_type | 70 self.options = test_options |
| 82 self.test_data = test_data | |
| 83 self.save_perf_json = save_perf_json | |
| 84 self.screenshot_failures = screenshot_failures | |
| 85 self.wait_for_debugger = wait_for_debugger | |
| 86 self.disable_assertions = disable_assertions | |
| 87 self.test_pkg = test_pkg | 71 self.test_pkg = test_pkg |
| 88 self.ports_to_forward = ports_to_forward | 72 self.ports_to_forward = ports_to_forward |
| 89 | 73 |
| 90 #override | 74 #override |
| 91 def InstallTestPackage(self): | 75 def InstallTestPackage(self): |
| 92 self.test_pkg.Install(self.adb) | 76 self.test_pkg.Install(self.adb) |
| 93 | 77 |
| 94 #override | 78 #override |
| 95 def PushDataDeps(self): | 79 def PushDataDeps(self): |
| 96 # TODO(frankf): Implement a general approach for copying/installing | 80 # TODO(frankf): Implement a general approach for copying/installing |
| 97 # once across test runners. | 81 # once across test runners. |
| 98 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): | 82 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): |
| 99 logging.warning('Already copied test files to device %s, skipping.', | 83 logging.warning('Already copied test files to device %s, skipping.', |
| 100 self.device) | 84 self.device) |
| 101 return | 85 return |
| 102 | 86 |
| 103 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName()) | 87 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName()) |
| 104 if test_data: | 88 if test_data: |
| 105 # Make sure SD card is ready. | 89 # Make sure SD card is ready. |
| 106 self.adb.WaitForSdCardReady(20) | 90 self.adb.WaitForSdCardReady(20) |
| 107 for p in test_data: | 91 for p in test_data: |
| 108 self.adb.PushIfNeeded( | 92 self.adb.PushIfNeeded( |
| 109 os.path.join(constants.DIR_SOURCE_ROOT, p), | 93 os.path.join(constants.DIR_SOURCE_ROOT, p), |
| 110 os.path.join(self.adb.GetExternalStorage(), p)) | 94 os.path.join(self.adb.GetExternalStorage(), p)) |
| 111 | 95 |
| 112 # TODO(frankf): Specify test data in this file as opposed to passing | 96 # TODO(frankf): Specify test data in this file as opposed to passing |
| 113 # as command-line. | 97 # as command-line. |
| 114 for dest_host_pair in self.test_data: | 98 for dest_host_pair in self.options.test_data: |
| 115 dst_src = dest_host_pair.split(':',1) | 99 dst_src = dest_host_pair.split(':',1) |
| 116 dst_layer = dst_src[0] | 100 dst_layer = dst_src[0] |
| 117 host_src = dst_src[1] | 101 host_src = dst_src[1] |
| 118 host_test_files_path = constants.DIR_SOURCE_ROOT + '/' + host_src | 102 host_test_files_path = constants.DIR_SOURCE_ROOT + '/' + host_src |
| 119 if os.path.exists(host_test_files_path): | 103 if os.path.exists(host_test_files_path): |
| 120 self.adb.PushIfNeeded(host_test_files_path, | 104 self.adb.PushIfNeeded(host_test_files_path, |
| 121 self.adb.GetExternalStorage() + '/' + | 105 self.adb.GetExternalStorage() + '/' + |
| 122 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) | 106 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) |
| 123 self.tool.CopyFiles() | 107 self.tool.CopyFiles() |
| 124 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True | 108 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True |
| 125 | 109 |
| 126 def _GetInstrumentationArgs(self): | 110 def _GetInstrumentationArgs(self): |
| 127 ret = {} | 111 ret = {} |
| 128 if self.wait_for_debugger: | 112 if self.options.wait_for_debugger: |
| 129 ret['debug'] = 'true' | 113 ret['debug'] = 'true' |
| 130 return ret | 114 return ret |
| 131 | 115 |
| 132 def _TakeScreenshot(self, test): | 116 def _TakeScreenshot(self, test): |
| 133 """Takes a screenshot from the device.""" | 117 """Takes a screenshot from the device.""" |
| 134 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png') | 118 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png') |
| 135 logging.info('Taking screenshot named %s', screenshot_name) | 119 logging.info('Taking screenshot named %s', screenshot_name) |
| 136 self.adb.TakeScreenshot(screenshot_name) | 120 self.adb.TakeScreenshot(screenshot_name) |
| 137 | 121 |
| 138 def SetUp(self): | 122 def SetUp(self): |
| 139 """Sets up the test harness and device before all tests are run.""" | 123 """Sets up the test harness and device before all tests are run.""" |
| 140 super(TestRunner, self).SetUp() | 124 super(TestRunner, self).SetUp() |
| 141 if not self.adb.IsRootEnabled(): | 125 if not self.adb.IsRootEnabled(): |
| 142 logging.warning('Unable to enable java asserts for %s, non rooted device', | 126 logging.warning('Unable to enable java asserts for %s, non rooted device', |
| 143 self.device) | 127 self.device) |
| 144 else: | 128 else: |
| 145 if self.adb.SetJavaAssertsEnabled(enable=not self.disable_assertions): | 129 if self.adb.SetJavaAssertsEnabled( |
| 130 enable=not self.options.disable_assertions): |
| 146 self.adb.Reboot(full_reboot=False) | 131 self.adb.Reboot(full_reboot=False) |
| 147 | 132 |
| 148 # We give different default value to launch HTTP server based on shard index | 133 # We give different default value to launch HTTP server based on shard index |
| 149 # because it may have race condition when multiple processes are trying to | 134 # because it may have race condition when multiple processes are trying to |
| 150 # launch lighttpd with same port at same time. | 135 # launch lighttpd with same port at same time. |
| 151 http_server_ports = self.LaunchTestHttpServer( | 136 http_server_ports = self.LaunchTestHttpServer( |
| 152 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port) | 137 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port) |
| 153 if self.ports_to_forward: | 138 if self.ports_to_forward: |
| 154 self.StartForwarder([(port, port) for port in self.ports_to_forward]) | 139 self.StartForwarder([(port, port) for port in self.ports_to_forward]) |
| 155 self.flags.AddFlags(['--enable-test-intents']) | 140 self.flags.AddFlags(['--enable-test-intents']) |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 241 # Obtain the relevant perf data. The data is dumped to a | 226 # Obtain the relevant perf data. The data is dumped to a |
| 242 # JSON formatted file. | 227 # JSON formatted file. |
| 243 json_string = self.adb.GetProtectedFileContents( | 228 json_string = self.adb.GetProtectedFileContents( |
| 244 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') | 229 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') |
| 245 | 230 |
| 246 if json_string: | 231 if json_string: |
| 247 json_string = '\n'.join(json_string) | 232 json_string = '\n'.join(json_string) |
| 248 else: | 233 else: |
| 249 raise Exception('Perf file does not exist or is empty') | 234 raise Exception('Perf file does not exist or is empty') |
| 250 | 235 |
| 251 if self.save_perf_json: | 236 if self.options.save_perf_json: |
| 252 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name | 237 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name |
| 253 with open(json_local_file, 'w') as f: | 238 with open(json_local_file, 'w') as f: |
| 254 f.write(json_string) | 239 f.write(json_string) |
| 255 logging.info('Saving Perf UI JSON from test ' + | 240 logging.info('Saving Perf UI JSON from test ' + |
| 256 test + ' to ' + json_local_file) | 241 test + ' to ' + json_local_file) |
| 257 | 242 |
| 258 raw_perf_data = regex.group(1).split(';') | 243 raw_perf_data = regex.group(1).split(';') |
| 259 | 244 |
| 260 for raw_perf_set in raw_perf_data: | 245 for raw_perf_set in raw_perf_data: |
| 261 if raw_perf_set: | 246 if raw_perf_set: |
| (...skipping 15 matching lines...) Expand all Loading... |
| 277 | 262 |
| 278 def _GetIndividualTestTimeoutScale(self, test): | 263 def _GetIndividualTestTimeoutScale(self, test): |
| 279 """Returns the timeout scale for the given |test|.""" | 264 """Returns the timeout scale for the given |test|.""" |
| 280 annotations = self.test_pkg.GetTestAnnotations(test) | 265 annotations = self.test_pkg.GetTestAnnotations(test) |
| 281 timeout_scale = 1 | 266 timeout_scale = 1 |
| 282 if 'TimeoutScale' in annotations: | 267 if 'TimeoutScale' in annotations: |
| 283 for annotation in annotations: | 268 for annotation in annotations: |
| 284 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) | 269 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) |
| 285 if scale_match: | 270 if scale_match: |
| 286 timeout_scale = int(scale_match.group(1)) | 271 timeout_scale = int(scale_match.group(1)) |
| 287 if self.wait_for_debugger: | 272 if self.options.wait_for_debugger: |
| 288 timeout_scale *= 100 | 273 timeout_scale *= 100 |
| 289 return timeout_scale | 274 return timeout_scale |
| 290 | 275 |
| 291 def _GetIndividualTestTimeoutSecs(self, test): | 276 def _GetIndividualTestTimeoutSecs(self, test): |
| 292 """Returns the timeout in seconds for the given |test|.""" | 277 """Returns the timeout in seconds for the given |test|.""" |
| 293 annotations = self.test_pkg.GetTestAnnotations(test) | 278 annotations = self.test_pkg.GetTestAnnotations(test) |
| 294 if 'Manual' in annotations: | 279 if 'Manual' in annotations: |
| 295 return 600 * 60 | 280 return 600 * 60 |
| 296 if 'External' in annotations: | 281 if 'External' in annotations: |
| 297 return 10 * 60 | 282 return 10 * 60 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 321 try: | 306 try: |
| 322 self.TestSetup(test) | 307 self.TestSetup(test) |
| 323 start_date_ms = int(time.time()) * 1000 | 308 start_date_ms = int(time.time()) * 1000 |
| 324 raw_result = self._RunTest(test, timeout) | 309 raw_result = self._RunTest(test, timeout) |
| 325 duration_ms = int(time.time()) * 1000 - start_date_ms | 310 duration_ms = int(time.time()) * 1000 - start_date_ms |
| 326 status_code = raw_result.GetStatusCode() | 311 status_code = raw_result.GetStatusCode() |
| 327 if status_code: | 312 if status_code: |
| 328 log = raw_result.GetFailureReason() | 313 log = raw_result.GetFailureReason() |
| 329 if not log: | 314 if not log: |
| 330 log = 'No information.' | 315 log = 'No information.' |
| 331 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: | 316 if (self.options.screenshot_failures or |
| 317 log.find('INJECT_EVENTS perm') >= 0): |
| 332 self._TakeScreenshot(test) | 318 self._TakeScreenshot(test) |
| 333 result = test_result.InstrumentationTestResult( | 319 result = test_result.InstrumentationTestResult( |
| 334 test, base_test_result.ResultType.FAIL, start_date_ms, duration_ms, | 320 test, base_test_result.ResultType.FAIL, start_date_ms, duration_ms, |
| 335 log=log) | 321 log=log) |
| 336 else: | 322 else: |
| 337 result = test_result.InstrumentationTestResult( | 323 result = test_result.InstrumentationTestResult( |
| 338 test, base_test_result.ResultType.PASS, start_date_ms, duration_ms) | 324 test, base_test_result.ResultType.PASS, start_date_ms, duration_ms) |
| 339 results.AddResult(result) | 325 results.AddResult(result) |
| 340 # Catch exceptions thrown by StartInstrumentation(). | 326 # Catch exceptions thrown by StartInstrumentation(). |
| 341 # See ../../third_party/android/testrunner/adb_interface.py | 327 # See ../../third_party/android/testrunner/adb_interface.py |
| 342 except (android_commands.errors.WaitForResponseTimedOutError, | 328 except (android_commands.errors.WaitForResponseTimedOutError, |
| 343 android_commands.errors.DeviceUnresponsiveError, | 329 android_commands.errors.DeviceUnresponsiveError, |
| 344 android_commands.errors.InstrumentationError), e: | 330 android_commands.errors.InstrumentationError), e: |
| 345 if start_date_ms: | 331 if start_date_ms: |
| 346 duration_ms = int(time.time()) * 1000 - start_date_ms | 332 duration_ms = int(time.time()) * 1000 - start_date_ms |
| 347 else: | 333 else: |
| 348 start_date_ms = int(time.time()) * 1000 | 334 start_date_ms = int(time.time()) * 1000 |
| 349 duration_ms = 0 | 335 duration_ms = 0 |
| 350 message = str(e) | 336 message = str(e) |
| 351 if not message: | 337 if not message: |
| 352 message = 'No information.' | 338 message = 'No information.' |
| 353 results.AddResult(test_result.InstrumentationTestResult( | 339 results.AddResult(test_result.InstrumentationTestResult( |
| 354 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, | 340 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, |
| 355 log=message)) | 341 log=message)) |
| 356 raw_result = None | 342 raw_result = None |
| 357 self.TestTeardown(test, raw_result) | 343 self.TestTeardown(test, raw_result) |
| 358 return (results, None if results.DidRunPass() else test) | 344 return (results, None if results.DidRunPass() else test) |
| OLD | NEW |