OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" | 5 """Runs the Java tests. See more information on run_instrumentation_tests.py.""" |
6 | 6 |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import re | 9 import re |
10 import shutil | 10 import shutil |
11 import sys | 11 import sys |
12 import time | 12 import time |
13 | 13 |
14 from pylib import android_commands | 14 from pylib import android_commands |
15 from pylib import cmd_helper | 15 from pylib import cmd_helper |
16 from pylib import constants | 16 from pylib import constants |
17 from pylib import forwarder | 17 from pylib import forwarder |
18 from pylib import json_perf_parser | 18 from pylib import json_perf_parser |
19 from pylib import perf_tests_helper | 19 from pylib import perf_tests_helper |
20 from pylib import valgrind_tools | 20 from pylib import valgrind_tools |
21 from pylib.base import base_test_runner | 21 from pylib.base import base_test_runner |
22 from pylib.base import test_result | 22 from pylib.base import test_result |
23 | 23 |
24 import apk_info | |
25 | |
26 | 24 |
27 _PERF_TEST_ANNOTATION = 'PerfTest' | 25 _PERF_TEST_ANNOTATION = 'PerfTest' |
28 | 26 |
29 | 27 |
30 class TestRunner(base_test_runner.BaseTestRunner): | 28 class TestRunner(base_test_runner.BaseTestRunner): |
31 """Responsible for running a series of tests connected to a single device.""" | 29 """Responsible for running a series of tests connected to a single device.""" |
32 | 30 |
33 _DEVICE_DATA_DIR = 'chrome/test/data' | 31 _DEVICE_DATA_DIR = 'chrome/test/data' |
34 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), | 32 _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), |
35 'external/emma/lib/emma.jar') | 33 'external/emma/lib/emma.jar') |
36 _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' | 34 _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' |
37 _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') | 35 _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') |
38 _COVERAGE_FILENAME = 'coverage.ec' | 36 _COVERAGE_FILENAME = 'coverage.ec' |
39 _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + | 37 _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + |
40 _COVERAGE_FILENAME) | 38 _COVERAGE_FILENAME) |
41 _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', | 39 _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', |
42 ''), | 40 ''), |
43 'out/target/common/obj/APPS', | 41 'out/target/common/obj/APPS', |
44 'Chrome_intermediates/coverage.em') | 42 'Chrome_intermediates/coverage.em') |
45 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' | 43 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' |
46 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + | 44 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + |
47 '/chrome-profile*') | 45 '/chrome-profile*') |
48 _DEVICE_HAS_TEST_FILES = {} | 46 _DEVICE_HAS_TEST_FILES = {} |
49 | 47 |
50 def __init__(self, options, device, shard_index, coverage, apks, | 48 def __init__(self, options, device, shard_index, coverage, test_pkg, |
51 ports_to_forward): | 49 ports_to_forward, is_uiautomator_test=False): |
52 """Create a new TestRunner. | 50 """Create a new TestRunner. |
53 | 51 |
54 Args: | 52 Args: |
55 options: An options object with the following required attributes: | 53 options: An options object with the following required attributes: |
56 - build_type: 'Release' or 'Debug'. | 54 - build_type: 'Release' or 'Debug'. |
57 - install_apk: Re-installs the apk if opted. | 55 - install_apk: Re-installs the apk if opted. |
58 - save_perf_json: Whether or not to save the JSON file from UI perf | 56 - save_perf_json: Whether or not to save the JSON file from UI perf |
59 tests. | 57 tests. |
60 - screenshot_failures: Take a screenshot for a test failure | 58 - screenshot_failures: Take a screenshot for a test failure |
61 - tool: Name of the Valgrind tool. | 59 - tool: Name of the Valgrind tool. |
62 - wait_for_debugger: blocks until the debugger is connected. | 60 - wait_for_debugger: blocks until the debugger is connected. |
63 - disable_assertions: Whether to disable java assertions on the device. | 61 - disable_assertions: Whether to disable java assertions on the device. |
64 device: Attached android device. | 62 device: Attached android device. |
65 shard_index: Shard index. | 63 shard_index: Shard index. |
66 coverage: Collects coverage information if opted. | 64 coverage: Collects coverage information if opted. |
67 apks: A list of ApkInfo objects need to be installed. The first element | 65 test_pkg: A TestPackage object. |
68 should be the tests apk, the rests could be the apks used in test. | |
69 The default is ChromeTest.apk. | |
70 ports_to_forward: A list of port numbers for which to set up forwarders. | 66 ports_to_forward: A list of port numbers for which to set up forwarders. |
71 Can be optionally requested by a test case. | 67 Can be optionally requested by a test case. |
| 68 is_uiautomator_test: Whether this is a uiautomator test. |
72 Raises: | 69 Raises: |
73 Exception: if coverage metadata is not available. | 70 Exception: if coverage metadata is not available. |
74 """ | 71 """ |
75 super(TestRunner, self).__init__(device, options.tool, options.build_type) | 72 super(TestRunner, self).__init__(device, options.tool, options.build_type) |
76 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index | 73 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index |
77 | 74 |
78 if not apks: | |
79 apks = [apk_info.ApkInfo(options.test_apk_path, | |
80 options.test_apk_jar_path)] | |
81 | |
82 self.build_type = options.build_type | 75 self.build_type = options.build_type |
83 self.install_apk = options.install_apk | |
84 self.test_data = options.test_data | 76 self.test_data = options.test_data |
85 self.save_perf_json = options.save_perf_json | 77 self.save_perf_json = options.save_perf_json |
86 self.screenshot_failures = options.screenshot_failures | 78 self.screenshot_failures = options.screenshot_failures |
87 self.wait_for_debugger = options.wait_for_debugger | 79 self.wait_for_debugger = options.wait_for_debugger |
88 self.disable_assertions = options.disable_assertions | 80 self.disable_assertions = options.disable_assertions |
89 | |
90 self.coverage = coverage | 81 self.coverage = coverage |
91 self.apks = apks | 82 self.test_pkg = test_pkg |
92 self.test_apk = apks[0] | |
93 self.instrumentation_class_path = self.test_apk.GetPackageName() | |
94 self.ports_to_forward = ports_to_forward | 83 self.ports_to_forward = ports_to_forward |
| 84 self.is_uiautomator_test = is_uiautomator_test |
| 85 if self.is_uiautomator_test: |
| 86 self.package_name = options.package_name |
| 87 else: |
| 88 self.install_apk = options.install_apk |
95 | 89 |
96 self.forwarder = None | 90 self.forwarder = None |
97 | 91 |
98 if self.coverage: | 92 if self.coverage: |
99 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): | 93 if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): |
100 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) | 94 os.remove(TestRunner._COVERAGE_MERGED_FILENAME) |
101 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): | 95 if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): |
102 raise Exception('FATAL ERROR in ' + sys.argv[0] + | 96 raise Exception('FATAL ERROR in ' + sys.argv[0] + |
103 ' : Coverage meta info [' + | 97 ' : Coverage meta info [' + |
104 TestRunner._COVERAGE_META_INFO_PATH + | 98 TestRunner._COVERAGE_META_INFO_PATH + |
(...skipping 13 matching lines...) Expand all Loading... |
118 return | 112 return |
119 for dest_host_pair in self.test_data: | 113 for dest_host_pair in self.test_data: |
120 dst_src = dest_host_pair.split(':',1) | 114 dst_src = dest_host_pair.split(':',1) |
121 dst_layer = dst_src[0] | 115 dst_layer = dst_src[0] |
122 host_src = dst_src[1] | 116 host_src = dst_src[1] |
123 host_test_files_path = constants.CHROME_DIR + '/' + host_src | 117 host_test_files_path = constants.CHROME_DIR + '/' + host_src |
124 if os.path.exists(host_test_files_path): | 118 if os.path.exists(host_test_files_path): |
125 self.adb.PushIfNeeded(host_test_files_path, | 119 self.adb.PushIfNeeded(host_test_files_path, |
126 self.adb.GetExternalStorage() + '/' + | 120 self.adb.GetExternalStorage() + '/' + |
127 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) | 121 TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) |
128 if self.install_apk: | 122 if self.is_uiautomator_test: |
129 for apk in self.apks: | 123 self.test_pkg.Install(self.adb) |
130 self.adb.ManagedInstall(apk.GetApkPath(), | 124 elif self.install_apk: |
131 package_name=apk.GetPackageName()) | 125 self.test_pkg.Install(self.adb) |
| 126 |
132 self.tool.CopyFiles() | 127 self.tool.CopyFiles() |
133 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True | 128 TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True |
134 | 129 |
135 def SaveCoverageData(self, test): | 130 def SaveCoverageData(self, test): |
136 """Saves the Emma coverage data before it's overwritten by the next test. | 131 """Saves the Emma coverage data before it's overwritten by the next test. |
137 | 132 |
138 Args: | 133 Args: |
139 test: the test whose coverage data is collected. | 134 test: the test whose coverage data is collected. |
140 """ | 135 """ |
141 if not self.coverage: | 136 if not self.coverage: |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
246 | 241 |
247 def _IsPerfTest(self, test): | 242 def _IsPerfTest(self, test): |
248 """Determines whether a test is a performance test. | 243 """Determines whether a test is a performance test. |
249 | 244 |
250 Args: | 245 Args: |
251 test: The name of the test to be checked. | 246 test: The name of the test to be checked. |
252 | 247 |
253 Returns: | 248 Returns: |
254 Whether the test is annotated as a performance test. | 249 Whether the test is annotated as a performance test. |
255 """ | 250 """ |
256 return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test) | 251 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) |
257 | 252 |
258 def SetupPerfMonitoringIfNeeded(self, test): | 253 def SetupPerfMonitoringIfNeeded(self, test): |
259 """Sets up performance monitoring if the specified test requires it. | 254 """Sets up performance monitoring if the specified test requires it. |
260 | 255 |
261 Args: | 256 Args: |
262 test: The name of the test to be run. | 257 test: The name of the test to be run. |
263 """ | 258 """ |
264 if not self._IsPerfTest(test): | 259 if not self._IsPerfTest(test): |
265 return | 260 return |
266 self.adb.Adb().SendCommand('shell rm ' + | 261 self.adb.Adb().SendCommand('shell rm ' + |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
345 perf_tests_helper.PrintPerfResult(perf_set[1], perf_set[2], | 340 perf_tests_helper.PrintPerfResult(perf_set[1], perf_set[2], |
346 [result['average']], | 341 [result['average']], |
347 result['units']) | 342 result['units']) |
348 | 343 |
349 def _SetupIndividualTestTimeoutScale(self, test): | 344 def _SetupIndividualTestTimeoutScale(self, test): |
350 timeout_scale = self._GetIndividualTestTimeoutScale(test) | 345 timeout_scale = self._GetIndividualTestTimeoutScale(test) |
351 valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) | 346 valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) |
352 | 347 |
353 def _GetIndividualTestTimeoutScale(self, test): | 348 def _GetIndividualTestTimeoutScale(self, test): |
354 """Returns the timeout scale for the given |test|.""" | 349 """Returns the timeout scale for the given |test|.""" |
355 annotations = self.apks[0].GetTestAnnotations(test) | 350 annotations = self.test_pkg.GetTestAnnotations(test) |
356 timeout_scale = 1 | 351 timeout_scale = 1 |
357 if 'TimeoutScale' in annotations: | 352 if 'TimeoutScale' in annotations: |
358 for annotation in annotations: | 353 for annotation in annotations: |
359 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) | 354 scale_match = re.match('TimeoutScale:([0-9]+)', annotation) |
360 if scale_match: | 355 if scale_match: |
361 timeout_scale = int(scale_match.group(1)) | 356 timeout_scale = int(scale_match.group(1)) |
362 if self.wait_for_debugger: | 357 if self.wait_for_debugger: |
363 timeout_scale *= 100 | 358 timeout_scale *= 100 |
364 return timeout_scale | 359 return timeout_scale |
365 | 360 |
366 def _GetIndividualTestTimeoutSecs(self, test): | 361 def _GetIndividualTestTimeoutSecs(self, test): |
367 """Returns the timeout in seconds for the given |test|.""" | 362 """Returns the timeout in seconds for the given |test|.""" |
368 annotations = self.apks[0].GetTestAnnotations(test) | 363 annotations = self.test_pkg.GetTestAnnotations(test) |
369 if 'Manual' in annotations: | 364 if 'Manual' in annotations: |
370 return 600 * 60 | 365 return 600 * 60 |
371 if 'External' in annotations: | 366 if 'External' in annotations: |
372 return 10 * 60 | 367 return 10 * 60 |
373 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: | 368 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: |
374 return 5 * 60 | 369 return 5 * 60 |
375 if 'MediumTest' in annotations: | 370 if 'MediumTest' in annotations: |
376 return 3 * 60 | 371 return 3 * 60 |
377 return 1 * 60 | 372 return 1 * 60 |
378 | 373 |
379 def RunTest(self, test): | 374 def RunTest(self, test): |
380 """Runs the test, generating the coverage if needed. | 375 """Runs the test, generating the coverage if needed. |
381 | 376 |
382 Returns: | 377 Returns: |
383 A test_result.TestResults object. | 378 A test_result.TestResults object. |
384 """ | 379 """ |
385 instrumentation_path = (self.instrumentation_class_path + | |
386 '/android.test.InstrumentationTestRunner') | |
387 instrumentation_args = self._GetInstrumentationArgs() | |
388 raw_result = None | 380 raw_result = None |
389 start_date_ms = None | 381 start_date_ms = None |
390 test_results = test_result.TestResults() | 382 test_results = test_result.TestResults() |
| 383 timeout=(self._GetIndividualTestTimeoutSecs(test) * |
| 384 self._GetIndividualTestTimeoutScale(test) * |
| 385 self.tool.GetTimeoutScale()) |
391 try: | 386 try: |
392 self.TestSetup(test) | 387 self.TestSetup(test) |
393 start_date_ms = int(time.time()) * 1000 | 388 start_date_ms = int(time.time()) * 1000 |
394 args_with_filter = dict(instrumentation_args) | 389 |
395 args_with_filter['class'] = test | 390 if self.is_uiautomator_test: |
396 # |raw_results| is a list that should contain | 391 self.adb.ClearApplicationState(self.package_name) |
397 # a single TestResult object. | 392 # TODO(frankf): Stop-gap solution. Should use annotations. |
398 logging.warn(args_with_filter) | 393 if 'FirstRun' in test: |
399 (raw_results, _) = self.adb.Adb().StartInstrumentation( | 394 self.flags.RemoveFlags(['--disable-fre']) |
400 instrumentation_path=instrumentation_path, | 395 else: |
401 instrumentation_args=args_with_filter, | 396 self.flags.AddFlags(['--disable-fre']) |
402 timeout_time=(self._GetIndividualTestTimeoutSecs(test) * | 397 raw_result = self.adb.RunUIAutomatorTest( |
403 self._GetIndividualTestTimeoutScale(test) * | 398 test, self.test_pkg.GetPackageName(), timeout) |
404 self.tool.GetTimeoutScale())) | 399 else: |
| 400 raw_result = self.adb.RunInstrumentationTest( |
| 401 test, self.test_pkg.GetPackageName(), |
| 402 self._GetInstrumentationArgs(), timeout) |
| 403 |
405 duration_ms = int(time.time()) * 1000 - start_date_ms | 404 duration_ms = int(time.time()) * 1000 - start_date_ms |
406 assert len(raw_results) == 1 | |
407 raw_result = raw_results[0] | |
408 status_code = raw_result.GetStatusCode() | 405 status_code = raw_result.GetStatusCode() |
409 if status_code: | 406 if status_code: |
410 log = raw_result.GetFailureReason() | 407 log = raw_result.GetFailureReason() |
411 if not log: | 408 if not log: |
412 log = 'No information.' | 409 log = 'No information.' |
413 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: | 410 if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: |
414 self._TakeScreenshot(test) | 411 self._TakeScreenshot(test) |
415 test_results.failed = [test_result.SingleTestResult( | 412 test_results.failed = [test_result.SingleTestResult( |
416 test, start_date_ms, duration_ms, log)] | 413 test, start_date_ms, duration_ms, log)] |
417 else: | 414 else: |
(...skipping 10 matching lines...) Expand all Loading... |
428 start_date_ms = int(time.time()) * 1000 | 425 start_date_ms = int(time.time()) * 1000 |
429 duration_ms = 0 | 426 duration_ms = 0 |
430 message = str(e) | 427 message = str(e) |
431 if not message: | 428 if not message: |
432 message = 'No information.' | 429 message = 'No information.' |
433 test_results.crashed = [test_result.SingleTestResult( | 430 test_results.crashed = [test_result.SingleTestResult( |
434 test, start_date_ms, duration_ms, message)] | 431 test, start_date_ms, duration_ms, message)] |
435 raw_result = None | 432 raw_result = None |
436 self.TestTeardown(test, raw_result) | 433 self.TestTeardown(test, raw_result) |
437 return (test_results, None if test_results.ok else test) | 434 return (test_results, None if test_results.ok else test) |
OLD | NEW |