OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all the native unit tests. | 7 """Runs all the native unit tests. |
8 | 8 |
9 1. Copy over test binary to /data/local on device. | 9 1. Copy over test binary to /data/local on device. |
10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) | 10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
44 | 44 |
45 This file is generated by the tests running on devices. If running on emulator, | 45 This file is generated by the tests running on devices. If running on emulator, |
46 additonal filter file which lists the tests only failed in emulator will be | 46 additonal filter file which lists the tests only failed in emulator will be |
47 loaded. We don't care about the rare testcases which succeeded on emuatlor, but | 47 loaded. We don't care about the rare testcases which succeeded on emuatlor, but |
48 failed on device. | 48 failed on device. |
49 """ | 49 """ |
50 | 50 |
51 import fnmatch | 51 import fnmatch |
52 import logging | 52 import logging |
53 import os | 53 import os |
| 54 import signal |
54 import subprocess | 55 import subprocess |
55 import sys | 56 import sys |
56 import time | 57 import time |
57 | 58 |
58 from pylib import android_commands | 59 from pylib import android_commands |
59 from pylib.base_test_sharder import BaseTestSharder | 60 from pylib.base_test_sharder import BaseTestSharder |
| 61 from pylib import buildbot_report |
60 from pylib import constants | 62 from pylib import constants |
61 from pylib import debug_info | 63 from pylib import debug_info |
62 import emulator | 64 import emulator |
63 from pylib import ports | 65 from pylib import ports |
64 from pylib import run_tests_helper | 66 from pylib import run_tests_helper |
65 from pylib import test_options_parser | 67 from pylib import test_options_parser |
66 from pylib.single_test_runner import SingleTestRunner | 68 from pylib.single_test_runner import SingleTestRunner |
67 from pylib.test_result import BaseTestResult, TestResults | 69 from pylib.test_result import BaseTestResult, TestResults |
68 | 70 |
| 71 |
69 _TEST_SUITES = ['base_unittests', | 72 _TEST_SUITES = ['base_unittests', |
70 'content_unittests', | 73 'content_unittests', |
71 'gpu_unittests', | 74 'gpu_unittests', |
72 'ipc_tests', | 75 'ipc_tests', |
73 'net_unittests', | 76 'net_unittests', |
74 'sql_unittests', | 77 'sql_unittests', |
75 'sync_unit_tests', | 78 'sync_unit_tests', |
76 'ui_unittests', | 79 'ui_unittests', |
77 ] | 80 ] |
78 | 81 |
79 def FullyQualifiedTestSuites(apk, test_suites): | 82 |
| 83 def FullyQualifiedTestSuites(exe, test_suites): |
80 """Return a fully qualified list that represents all known suites. | 84 """Return a fully qualified list that represents all known suites. |
81 | 85 |
82 Args: | 86 Args: |
83 apk: if True, use the apk-based test runner | 87 exe: if True, use the executable-based test runner. |
84 test_suites: the source test suites to process""" | 88 test_suites: the source test suites to process. |
85 # If not specified, assume the test suites are in out/Release | 89 """ |
| 90 # Assume the test suites are in out/Release. |
86 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR, | 91 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR, |
87 'out', 'Release')) | 92 'out', 'Release')) |
88 if apk: | 93 if exe: |
| 94 suites = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] |
| 95 else: |
89 # out/Release/$SUITE_apk/$SUITE-debug.apk | 96 # out/Release/$SUITE_apk/$SUITE-debug.apk |
90 suites = [os.path.join(test_suite_dir, | 97 suites = [os.path.join(test_suite_dir, |
91 t + '_apk', | 98 t + '_apk', |
92 t + '-debug.apk') | 99 t + '-debug.apk') |
93 for t in test_suites] | 100 for t in test_suites] |
94 else: | |
95 suites = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] | |
96 return suites | 101 return suites |
97 | 102 |
98 | 103 |
99 class TimeProfile(object): | 104 class TimeProfile(object): |
100 """Class for simple profiling of action, with logging of cost.""" | 105 """Class for simple profiling of action, with logging of cost.""" |
101 | 106 |
102 def __init__(self, description): | 107 def __init__(self, description): |
103 self._description = description | 108 self._description = description |
104 self.Start() | 109 self.Start() |
105 | 110 |
106 def Start(self): | 111 def Start(self): |
107 self._starttime = time.time() | 112 self._starttime = time.time() |
108 | 113 |
109 def Stop(self): | 114 def Stop(self): |
110 """Stop profiling and dump a log.""" | 115 """Stop profiling and dump a log.""" |
111 if self._starttime: | 116 if self._starttime: |
112 stoptime = time.time() | 117 stoptime = time.time() |
113 logging.info('%fsec to perform %s' % | 118 logging.info('%fsec to perform %s', |
114 (stoptime - self._starttime, self._description)) | 119 stoptime - self._starttime, self._description) |
115 self._starttime = None | 120 self._starttime = None |
116 | 121 |
| 122 |
117 class Xvfb(object): | 123 class Xvfb(object): |
118 """Class to start and stop Xvfb if relevant. Nop if not Linux.""" | 124 """Class to start and stop Xvfb if relevant. Nop if not Linux.""" |
119 | 125 |
120 def __init__(self): | 126 def __init__(self): |
121 self._pid = 0 | 127 self._pid = 0 |
122 | 128 |
123 def _IsLinux(self): | 129 def _IsLinux(self): |
124 """Return True if on Linux; else False.""" | 130 """Return True if on Linux; else False.""" |
125 return sys.platform.startswith('linux') | 131 return sys.platform.startswith('linux') |
126 | 132 |
127 def Start(self): | 133 def Start(self): |
128 """Start Xvfb and set an appropriate DISPLAY environment. Linux only. | 134 """Start Xvfb and set an appropriate DISPLAY environment. Linux only. |
129 | 135 |
130 Copied from tools/code_coverage/coverage_posix.py | 136 Copied from tools/code_coverage/coverage_posix.py |
131 """ | 137 """ |
132 if not self._IsLinux(): | 138 if not self._IsLinux(): |
133 return | 139 return |
134 proc = subprocess.Popen(["Xvfb", ":9", "-screen", "0", "1024x768x24", | 140 proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24', |
135 "-ac"], | 141 '-ac'], |
136 stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | 142 stdout=subprocess.PIPE, stderr=subprocess.STDOUT) |
137 self._pid = proc.pid | 143 self._pid = proc.pid |
138 if not self._pid: | 144 if not self._pid: |
139 raise Exception('Could not start Xvfb') | 145 raise Exception('Could not start Xvfb') |
140 os.environ['DISPLAY'] = ":9" | 146 os.environ['DISPLAY'] = ':9' |
141 | 147 |
142 # Now confirm, giving a chance for it to start if needed. | 148 # Now confirm, giving a chance for it to start if needed. |
143 for test in range(10): | 149 for _ in range(10): |
144 proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True) | 150 proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True) |
145 pid, retcode = os.waitpid(proc.pid, 0) | 151 _, retcode = os.waitpid(proc.pid, 0) |
146 if retcode == 0: | 152 if retcode == 0: |
147 break | 153 break |
148 time.sleep(0.25) | 154 time.sleep(0.25) |
149 if retcode != 0: | 155 if retcode != 0: |
150 raise Exception('Could not confirm Xvfb happiness') | 156 raise Exception('Could not confirm Xvfb happiness') |
151 | 157 |
152 def Stop(self): | 158 def Stop(self): |
153 """Stop Xvfb if needed. Linux only.""" | 159 """Stop Xvfb if needed. Linux only.""" |
154 if self._pid: | 160 if self._pid: |
155 try: | 161 try: |
156 os.kill(self._pid, signal.SIGKILL) | 162 os.kill(self._pid, signal.SIGKILL) |
157 except: | 163 except: |
158 pass | 164 pass |
159 del os.environ['DISPLAY'] | 165 del os.environ['DISPLAY'] |
160 self._pid = 0 | 166 self._pid = 0 |
161 | 167 |
| 168 |
162 def PrintAnnotationForTestResults(test_results): | 169 def PrintAnnotationForTestResults(test_results): |
163 if test_results.timed_out: | 170 if test_results.timed_out: |
164 print '@@@STEP_WARNINGS@@@' | 171 buildbot_report.PrintWarning() |
165 elif test_results.failed: | 172 elif test_results.failed or test_results.crashed or test_results.overall_fail: |
166 print '@@@STEP_FAILURE@@@' | 173 buildbot_report.PrintError() |
167 elif test_results.crashed: | |
168 print '@@@STEP_FAILURE@@@' | |
169 elif test_results.overall_fail: | |
170 print '@@@STEP_FAILURE@@@' | |
171 else: | 174 else: |
172 print 'Step success!' # No annotation needed | 175 print 'Step success!' # No annotation needed |
173 | 176 |
174 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, | 177 |
| 178 def RunTests(exe, device, test_suite, gtest_filter, test_arguments, rebaseline, |
175 timeout, performance_test, cleanup_test_files, tool, | 179 timeout, performance_test, cleanup_test_files, tool, |
176 log_dump_name, apk, annotate=False): | 180 log_dump_name, fast_and_loose): |
177 """Runs the tests. | 181 """Runs the tests. |
178 | 182 |
179 Args: | 183 Args: |
| 184 exe: boolean to state if we are using the exe based test runner |
180 device: Device to run the tests. | 185 device: Device to run the tests. |
181 test_suite: A specific test suite to run, empty to run all. | 186 test_suite: A specific test suite to run, empty to run all. |
182 gtest_filter: A gtest_filter flag. | 187 gtest_filter: A gtest_filter flag. |
183 test_arguments: Additional arguments to pass to the test binary. | 188 test_arguments: Additional arguments to pass to the test binary. |
184 rebaseline: Whether or not to run tests in isolation and update the filter. | 189 rebaseline: Whether or not to run tests in isolation and update the filter. |
185 timeout: Timeout for each test. | 190 timeout: Timeout for each test. |
186 performance_test: Whether or not performance test(s). | 191 performance_test: Whether or not performance test(s). |
187 cleanup_test_files: Whether or not to cleanup test files on device. | 192 cleanup_test_files: Whether or not to cleanup test files on device. |
188 tool: Name of the Valgrind tool. | 193 tool: Name of the Valgrind tool. |
189 log_dump_name: Name of log dump file. | 194 log_dump_name: Name of log dump file. |
190 apk: boolean to state if we are using the apk based test runner | 195 fast_and_loose: if set, skip copying data files. |
191 annotate: should we print buildbot-style annotations? | |
192 | 196 |
193 Returns: | 197 Returns: |
194 A TestResults object. | 198 A TestResults object. |
195 """ | 199 """ |
196 results = [] | 200 results = [] |
197 global _TEST_SUITES | |
198 | 201 |
199 if test_suite: | 202 if test_suite: |
200 global _TEST_SUITES | 203 if not os.path.exists(test_suite): |
201 | 204 logging.critical('Unrecognized test suite %s, supported: %s', |
202 # If not specified, assume the test suites are in out/Release | 205 test_suite, _TEST_SUITES) |
203 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR, | |
204 'out', 'Release')) | |
205 if (not os.path.exists(test_suite)): | |
206 logging.critical('Unrecognized test suite %s, supported: %s' % | |
207 (test_suite, _TEST_SUITES)) | |
208 if test_suite in _TEST_SUITES: | 206 if test_suite in _TEST_SUITES: |
209 logging.critical('(Remember to include the path: out/Release/%s)', | 207 logging.critical('(Remember to include the path: out/Release/%s)', |
210 test_suite) | 208 test_suite) |
211 test_suite_basename = os.path.basename(test_suite) | 209 test_suite_basename = os.path.basename(test_suite) |
212 if test_suite_basename in _TEST_SUITES: | 210 if test_suite_basename in _TEST_SUITES: |
213 logging.critical('Try "make -j15 %s"' % test_suite_basename) | 211 logging.critical('Try "make -j15 %s"', test_suite_basename) |
214 else: | 212 else: |
215 logging.critical('Unrecognized test suite, supported: %s' % | 213 logging.critical('Unrecognized test suite, supported: %s', |
216 _TEST_SUITES) | 214 _TEST_SUITES) |
217 return TestResults.FromRun([], [BaseTestResult(test_suite, '')], | 215 return TestResults.FromRun([], [BaseTestResult(test_suite, '')], |
218 False, False) | 216 False, False) |
219 fully_qualified_test_suites = [test_suite] | 217 fully_qualified_test_suites = [test_suite] |
220 else: | 218 else: |
221 fully_qualified_test_suites = FullyQualifiedTestSuites(apk, _TEST_SUITES) | 219 fully_qualified_test_suites = FullyQualifiedTestSuites(exe, _TEST_SUITES) |
222 debug_info_list = [] | 220 debug_info_list = [] |
223 print 'Known suites: ' + str(_TEST_SUITES) | 221 print 'Known suites: ' + str(_TEST_SUITES) |
224 print 'Running these: ' + str(fully_qualified_test_suites) | 222 print 'Running these: ' + str(fully_qualified_test_suites) |
225 for t in fully_qualified_test_suites: | 223 for t in fully_qualified_test_suites: |
226 if annotate: | 224 buildbot_report.PrintNamedStep('Test suite %s' % os.path.basename(t)) |
227 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t) | |
228 test = SingleTestRunner(device, t, gtest_filter, test_arguments, | 225 test = SingleTestRunner(device, t, gtest_filter, test_arguments, |
229 timeout, rebaseline, performance_test, | 226 timeout, rebaseline, performance_test, |
230 cleanup_test_files, tool, 0, not not log_dump_name) | 227 cleanup_test_files, tool, 0, not not log_dump_name, |
| 228 fast_and_loose) |
231 test.Run() | 229 test.Run() |
232 | 230 |
233 results += [test.test_results] | 231 results += [test.test_results] |
234 # Collect debug info. | 232 # Collect debug info. |
235 debug_info_list += [test.dump_debug_info] | 233 debug_info_list += [test.dump_debug_info] |
236 if rebaseline: | 234 if rebaseline: |
237 test.UpdateFilter(test.test_results.failed) | 235 test.UpdateFilter(test.test_results.failed) |
238 test.test_results.LogFull('Unit test', os.path.basename(t)) | 236 test.test_results.LogFull('Unit test', os.path.basename(t)) |
239 # Zip all debug info outputs into a file named by log_dump_name. | 237 # Zip all debug info outputs into a file named by log_dump_name. |
240 debug_info.GTestDebugInfo.ZipAndCleanResults( | 238 debug_info.GTestDebugInfo.ZipAndCleanResults( |
241 os.path.join(constants.CHROME_DIR, 'out', 'Release', | 239 os.path.join(constants.CHROME_DIR, 'out', 'Release', 'debug_info_dumps'), |
242 'debug_info_dumps'), | |
243 log_dump_name, [d for d in debug_info_list if d]) | 240 log_dump_name, [d for d in debug_info_list if d]) |
244 | 241 |
245 if annotate: | 242 PrintAnnotationForTestResults(test.test_results) |
246 PrintAnnotationForTestResults(test.test_results) | |
247 | 243 |
248 return TestResults.FromTestResults(results) | 244 return TestResults.FromTestResults(results) |
249 | 245 |
250 | 246 |
251 class TestSharder(BaseTestSharder): | 247 class TestSharder(BaseTestSharder): |
252 """Responsible for sharding the tests on the connected devices.""" | 248 """Responsible for sharding the tests on the connected devices.""" |
253 | 249 |
254 def __init__(self, attached_devices, test_suite, gtest_filter, | 250 def __init__(self, attached_devices, test_suite, gtest_filter, |
255 test_arguments, timeout, rebaseline, performance_test, | 251 test_arguments, timeout, rebaseline, performance_test, |
256 cleanup_test_files, tool, annotate): | 252 cleanup_test_files, tool, log_dump_name, fast_and_loose): |
257 BaseTestSharder.__init__(self, attached_devices) | 253 BaseTestSharder.__init__(self, attached_devices) |
258 self.test_suite = test_suite | 254 self.test_suite = test_suite |
259 self.test_suite_basename = os.path.basename(test_suite) | 255 self.test_suite_basename = os.path.basename(test_suite) |
260 self.gtest_filter = gtest_filter | 256 self.gtest_filter = gtest_filter |
261 self.test_arguments = test_arguments | 257 self.test_arguments = test_arguments |
262 self.timeout = timeout | 258 self.timeout = timeout |
263 self.rebaseline = rebaseline | 259 self.rebaseline = rebaseline |
264 self.performance_test = performance_test | 260 self.performance_test = performance_test |
265 self.cleanup_test_files = cleanup_test_files | 261 self.cleanup_test_files = cleanup_test_files |
266 self.tool = tool | 262 self.tool = tool |
267 self.annotate = annotate | 263 self.log_dump_name = log_dump_name |
| 264 self.fast_and_loose = fast_and_loose |
268 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, | 265 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, |
269 test_arguments, timeout, rebaseline, | 266 test_arguments, timeout, rebaseline, |
270 performance_test, cleanup_test_files, tool, 0) | 267 performance_test, cleanup_test_files, tool, 0, |
| 268 not not self.log_dump_name, fast_and_loose) |
271 # The executable/apk needs to be copied before we can call GetAllTests. | 269 # The executable/apk needs to be copied before we can call GetAllTests. |
272 test.test_package.StripAndCopyExecutable() | 270 test.test_package.StripAndCopyExecutable() |
273 all_tests = test.test_package.GetAllTests() | 271 all_tests = test.test_package.GetAllTests() |
274 if not rebaseline: | 272 if not rebaseline: |
275 disabled_list = test.GetDisabledTests() | 273 disabled_list = test.GetDisabledTests() |
276 # Only includes tests that do not have any match in the disabled list. | 274 # Only includes tests that do not have any match in the disabled list. |
277 all_tests = filter(lambda t: | 275 all_tests = filter(lambda t: |
278 not any([fnmatch.fnmatch(t, disabled_pattern) | 276 not any([fnmatch.fnmatch(t, disabled_pattern) |
279 for disabled_pattern in disabled_list]), | 277 for disabled_pattern in disabled_list]), |
280 all_tests) | 278 all_tests) |
281 self.tests = all_tests | 279 self.tests = all_tests |
282 | 280 |
283 def CreateShardedTestRunner(self, device, index): | 281 def CreateShardedTestRunner(self, device, index): |
284 """Creates a suite-specific test runner. | 282 """Creates a suite-specific test runner. |
285 | 283 |
286 Args: | 284 Args: |
287 device: Device serial where this shard will run. | 285 device: Device serial where this shard will run. |
288 index: Index of this device in the pool. | 286 index: Index of this device in the pool. |
289 | 287 |
290 Returns: | 288 Returns: |
291 A SingleTestRunner object. | 289 A SingleTestRunner object. |
292 """ | 290 """ |
293 device_num = len(self.attached_devices) | 291 device_num = len(self.attached_devices) |
294 shard_size = (len(self.tests) + device_num - 1) / device_num | 292 shard_size = (len(self.tests) + device_num - 1) / device_num |
295 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] | 293 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] |
296 test_filter = ':'.join(shard_test_list) | 294 test_filter = ':'.join(shard_test_list) |
297 return SingleTestRunner(device, self.test_suite, | 295 return SingleTestRunner(device, self.test_suite, |
298 test_filter, self.test_arguments, self.timeout, | 296 test_filter, self.test_arguments, self.timeout, |
299 self.rebaseline, self.performance_test, | 297 self.rebaseline, self.performance_test, |
300 self.cleanup_test_files, self.tool, index) | 298 self.cleanup_test_files, self.tool, index, |
| 299 not not self.log_dump_name, self.fast_and_loose) |
301 | 300 |
302 def OnTestsCompleted(self, test_runners, test_results): | 301 def OnTestsCompleted(self, test_runners, test_results): |
303 """Notifies that we completed the tests.""" | 302 """Notifies that we completed the tests.""" |
304 test_results.LogFull('Unit test', os.path.basename(self.test_suite)) | 303 test_results.LogFull('Unit test', os.path.basename(self.test_suite)) |
305 if self.annotate: | 304 PrintAnnotationForTestResults(test_results) |
306 PrintAnnotationForTestResults(test_results) | |
307 if test_results.failed and self.rebaseline: | 305 if test_results.failed and self.rebaseline: |
308 test_runners[0].UpdateFilter(test_results.failed) | 306 test_runners[0].UpdateFilter(test_results.failed) |
309 | 307 |
310 | 308 |
311 | |
312 def _RunATestSuite(options): | 309 def _RunATestSuite(options): |
313 """Run a single test suite. | 310 """Run a single test suite. |
314 | 311 |
315 Helper for Dispatch() to allow stop/restart of the emulator across | 312 Helper for Dispatch() to allow stop/restart of the emulator across |
316 test bundles. If using the emulator, we start it on entry and stop | 313 test bundles. If using the emulator, we start it on entry and stop |
317 it on exit. | 314 it on exit. |
318 | 315 |
319 Args: | 316 Args: |
320 options: options for running the tests. | 317 options: options for running the tests. |
321 | 318 |
322 Returns: | 319 Returns: |
323 0 if successful, number of failing tests otherwise. | 320 0 if successful, number of failing tests otherwise. |
324 """ | 321 """ |
325 attached_devices = [] | 322 attached_devices = [] |
326 buildbot_emulators = [] | 323 buildbot_emulators = [] |
327 | 324 |
328 if options.use_emulator: | 325 if options.use_emulator: |
329 for n in range(options.use_emulator): | 326 for n in range(options.use_emulator): |
330 t = TimeProfile('Emulator launch %d' % n) | 327 t = TimeProfile('Emulator launch %d' % n) |
331 buildbot_emulator = emulator.Emulator(options.fast_and_loose) | 328 buildbot_emulator = emulator.Emulator(options.fast_and_loose) |
332 buildbot_emulator.Launch(kill_all_emulators=n == 0) | 329 buildbot_emulator.Launch(kill_all_emulators=n == 0) |
333 t.Stop() | 330 t.Stop() |
334 buildbot_emulators.append(buildbot_emulator) | 331 buildbot_emulators.append(buildbot_emulator) |
335 attached_devices.append(buildbot_emulator.device) | 332 attached_devices.append(buildbot_emulator.device) |
336 # Wait for all emulators to boot completed. | 333 # Wait for all emulators to boot completed. |
337 map(lambda buildbot_emulator:buildbot_emulator.ConfirmLaunch(True), | 334 map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True), |
338 buildbot_emulators) | 335 buildbot_emulators) |
339 elif options.test_device: | 336 elif options.test_device: |
340 attached_devices = [options.test_device] | 337 attached_devices = [options.test_device] |
341 else: | 338 else: |
342 attached_devices = android_commands.GetAttachedDevices() | 339 attached_devices = android_commands.GetAttachedDevices() |
343 | 340 |
344 if not attached_devices: | 341 if not attached_devices: |
345 logging.critical('A device must be attached and online.') | 342 logging.critical('A device must be attached and online.') |
346 if options.annotate: | 343 buildbot_report.PrintError() |
347 print '@@@STEP_FAILURE@@@' | |
348 return 1 | 344 return 1 |
349 | 345 |
350 # Reset the test port allocation. It's important to do it before starting | 346 # Reset the test port allocation. It's important to do it before starting |
351 # to dispatch any tests. | 347 # to dispatch any tests. |
352 if not ports.ResetTestServerPortAllocation(): | 348 if not ports.ResetTestServerPortAllocation(): |
353 raise Exception('Failed to reset test server port.') | 349 raise Exception('Failed to reset test server port.') |
354 | 350 |
355 if (len(attached_devices) > 1 and options.test_suite and | 351 if (len(attached_devices) > 1 and options.test_suite and |
356 not options.gtest_filter and not options.performance_test): | 352 not options.gtest_filter and not options.performance_test): |
357 sharder = TestSharder(attached_devices, options.test_suite, | 353 sharder = TestSharder(attached_devices, options.test_suite, |
358 options.gtest_filter, options.test_arguments, | 354 options.gtest_filter, options.test_arguments, |
359 options.timeout, options.rebaseline, | 355 options.timeout, options.rebaseline, |
360 options.performance_test, | 356 options.performance_test, |
361 options.cleanup_test_files, options.tool, | 357 options.cleanup_test_files, options.tool, |
362 options.annotate) | 358 options.log_dump, options.fast_and_loose) |
363 test_results = sharder.RunShardedTests() | 359 test_results = sharder.RunShardedTests() |
364 else: | 360 else: |
365 test_results = RunTests(attached_devices[0], options.test_suite, | 361 test_results = RunTests(options.exe, attached_devices[0], |
| 362 options.test_suite, |
366 options.gtest_filter, options.test_arguments, | 363 options.gtest_filter, options.test_arguments, |
367 options.rebaseline, options.timeout, | 364 options.rebaseline, options.timeout, |
368 options.performance_test, | 365 options.performance_test, |
369 options.cleanup_test_files, options.tool, | 366 options.cleanup_test_files, options.tool, |
370 options.log_dump, | 367 options.log_dump, options.fast_and_loose) |
371 options.apk, | |
372 annotate=options.annotate) | |
373 | 368 |
374 for buildbot_emulator in buildbot_emulators: | 369 for buildbot_emulator in buildbot_emulators: |
375 buildbot_emulator.Shutdown() | 370 buildbot_emulator.Shutdown() |
376 | 371 |
377 # Another chance if we timed out? At this point It is safe(r) to | 372 # Another chance if we timed out? At this point It is safe(r) to |
378 # run fast and loose since we just uploaded all the test data and | 373 # run fast and loose since we just uploaded all the test data and |
379 # binary. | 374 # binary. |
380 if test_results.timed_out and options.repeat: | 375 if test_results.timed_out and options.repeat: |
381 logging.critical('Timed out; repeating in fast_and_loose mode.') | 376 logging.critical('Timed out; repeating in fast_and_loose mode.') |
382 options.fast_and_loose = True | 377 options.fast_and_loose = True |
383 options.repeat = options.repeat - 1 | 378 options.repeat -= 1 |
384 logging.critical('Repeats left: ' + str(options.repeat)) | 379 logging.critical('Repeats left: ' + str(options.repeat)) |
385 return _RunATestSuite(options) | 380 return _RunATestSuite(options) |
386 return len(test_results.failed) | 381 return len(test_results.failed) |
387 | 382 |
388 | 383 |
389 def Dispatch(options): | 384 def Dispatch(options): |
390 """Dispatches the tests, sharding if possible. | 385 """Dispatches the tests, sharding if possible. |
391 | 386 |
392 If options.use_emulator is True, all tests will be run in a new emulator | 387 If options.use_emulator is True, all tests will be run in a new emulator |
393 instance. | 388 instance. |
394 | 389 |
395 Args: | 390 Args: |
396 options: options for running the tests. | 391 options: options for running the tests. |
397 | 392 |
398 Returns: | 393 Returns: |
399 0 if successful, number of failing tests otherwise. | 394 0 if successful, number of failing tests otherwise. |
400 """ | 395 """ |
401 if options.test_suite == 'help': | 396 if options.test_suite == 'help': |
402 ListTestSuites() | 397 ListTestSuites() |
403 return 0 | 398 return 0 |
404 | 399 |
405 if options.use_xvfb: | 400 if options.use_xvfb: |
406 xvfb = Xvfb() | 401 xvfb = Xvfb() |
407 xvfb.Start() | 402 xvfb.Start() |
408 | 403 |
409 if options.test_suite: | 404 if options.test_suite: |
410 all_test_suites = FullyQualifiedTestSuites(options.apk, | 405 all_test_suites = FullyQualifiedTestSuites(options.exe, |
411 [options.test_suite]) | 406 [options.test_suite]) |
412 else: | 407 else: |
413 all_test_suites = FullyQualifiedTestSuites(options.apk, | 408 all_test_suites = FullyQualifiedTestSuites(options.exe, |
414 _TEST_SUITES) | 409 _TEST_SUITES) |
415 failures = 0 | 410 failures = 0 |
416 for suite in all_test_suites: | 411 for suite in all_test_suites: |
417 options.test_suite = suite | 412 options.test_suite = suite |
418 failures += _RunATestSuite(options) | 413 failures += _RunATestSuite(options) |
419 | 414 |
420 if options.use_xvfb: | 415 if options.use_xvfb: |
421 xvfb.Stop() | 416 xvfb.Stop() |
422 return failures | 417 return failures |
423 | 418 |
424 | 419 |
425 def ListTestSuites(): | 420 def ListTestSuites(): |
426 """Display a list of available test suites | 421 """Display a list of available test suites.""" |
427 """ | |
428 print 'Available test suites are:' | 422 print 'Available test suites are:' |
429 for test_suite in _TEST_SUITES: | 423 for test_suite in _TEST_SUITES: |
430 print test_suite | 424 print test_suite |
431 | 425 |
432 | 426 |
433 def main(argv): | 427 def main(argv): |
434 option_parser = test_options_parser.CreateTestRunnerOptionParser(None, | 428 option_parser = test_options_parser.CreateTestRunnerOptionParser( |
435 default_timeout=0) | 429 None, default_timeout=0) |
436 option_parser.add_option('-s', '--suite', dest='test_suite', | 430 option_parser.add_option('-s', '--suite', dest='test_suite', |
437 help='Executable name of the test suite to run ' | 431 help='Executable name of the test suite to run ' |
438 '(use -s help to list them)') | 432 '(use -s help to list them)') |
439 option_parser.add_option('-d', '--device', dest='test_device', | 433 option_parser.add_option('-d', '--device', dest='test_device', |
440 help='Target device the test suite to run ') | 434 help='Target device the test suite to run ') |
441 option_parser.add_option('-r', dest='rebaseline', | 435 option_parser.add_option('-r', dest='rebaseline', |
442 help='Rebaseline and update *testsuite_disabled', | 436 help='Rebaseline and update *testsuite_disabled', |
443 action='store_true', | 437 action='store_true') |
444 default=False) | |
445 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', | 438 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', |
446 help='gtest filter') | 439 help='gtest filter') |
447 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', | 440 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', |
448 help='Additional arguments to pass to the test') | 441 help='Additional arguments to pass to the test') |
449 option_parser.add_option('-p', dest='performance_test', | 442 option_parser.add_option('-p', dest='performance_test', |
450 help='Indicator of performance test', | 443 help='Indicator of performance test', |
451 action='store_true', | 444 action='store_true') |
452 default=False) | |
453 option_parser.add_option('-L', dest='log_dump', | 445 option_parser.add_option('-L', dest='log_dump', |
454 help='file name of log dump, which will be put in' | 446 help='file name of log dump, which will be put in' |
455 'subfolder debug_info_dumps under the same directory' | 447 'subfolder debug_info_dumps under the same directory' |
456 'in where the test_suite exists.') | 448 'in where the test_suite exists.') |
457 option_parser.add_option('-e', '--emulator', dest='use_emulator', | 449 option_parser.add_option('-e', '--emulator', dest='use_emulator', |
458 help='Run tests in a new instance of emulator', | 450 help='Run tests in a new instance of emulator', |
459 type='int', | 451 type='int', |
460 default=0) | 452 default=0) |
461 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', | 453 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', |
462 action='store_true', default=False, | 454 action='store_true', |
463 help='Use Xvfb around tests (ignored if not Linux)') | 455 help='Use Xvfb around tests (ignored if not Linux)') |
464 option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose', | 456 option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose', |
465 action='store_true', default=False, | 457 action='store_true', |
466 help='Go faster (but be less stable), ' | 458 help='Go faster (but be less stable), ' |
467 'for quick testing. Example: when tracking down ' | 459 'for quick testing. Example: when tracking down ' |
468 'tests that hang to add to the disabled list, ' | 460 'tests that hang to add to the disabled list, ' |
469 'there is no need to redeploy the test binary ' | 461 'there is no need to redeploy the test binary ' |
470 'or data to the device again. ' | 462 'or data to the device again. ' |
471 'Don\'t use on bots by default!') | 463 'Don\'t use on bots by default!') |
472 option_parser.add_option('--repeat', dest='repeat', type='int', | 464 option_parser.add_option('--repeat', dest='repeat', type='int', |
473 default=2, | 465 default=2, |
474 help='Repeat count on test timeout') | 466 help='Repeat count on test timeout') |
475 option_parser.add_option('--annotate', default=True, | 467 option_parser.add_option('--exit_code', action='store_true', |
476 help='Print buildbot-style annotate messages ' | 468 help='If set, the exit code will be total number ' |
477 'for each test suite. Default=True') | 469 'of failures.') |
478 option_parser.add_option('--apk', default=True, | 470 option_parser.add_option('--exe', action='store_true', |
479 help='Use the apk test runner by default') | 471 help='If set, use the exe test runner instead of ' |
| 472 'the APK.') |
480 options, args = option_parser.parse_args(argv) | 473 options, args = option_parser.parse_args(argv) |
481 if len(args) > 1: | 474 if len(args) > 1: |
482 print 'Unknown argument:', args[1:] | 475 print 'Unknown argument:', args[1:] |
483 option_parser.print_usage() | 476 option_parser.print_usage() |
484 sys.exit(1) | 477 sys.exit(1) |
485 run_tests_helper.SetLogLevel(options.verbose_count) | 478 run_tests_helper.SetLogLevel(options.verbose_count) |
486 failed_tests_count = Dispatch(options) | 479 failed_tests_count = Dispatch(options) |
487 | 480 |
488 # If we're printing annotations then failures of individual test suites are | 481 # Failures of individual test suites are communicated by printing a |
489 # communicated by printing a STEP_FAILURE message. | 482 # STEP_FAILURE message. |
490 # Returning a success exit status also prevents the buildbot from incorrectly | 483 # Returning a success exit status also prevents the buildbot from incorrectly |
491 # marking the last suite as failed if there were failures in other suites in | 484 # marking the last suite as failed if there were failures in other suites in |
492 # the batch (this happens because the exit status is a sum of all failures | 485 # the batch (this happens because the exit status is a sum of all failures |
493 # from all suites, but the buildbot associates the exit status only with the | 486 # from all suites, but the buildbot associates the exit status only with the |
494 # most recent step). | 487 # most recent step). |
495 if options.annotate: | 488 if options.exit_code: |
496 return 0 | |
497 else: | |
498 return failed_tests_count | 489 return failed_tests_count |
| 490 return 0 |
499 | 491 |
500 | 492 |
501 if __name__ == '__main__': | 493 if __name__ == '__main__': |
502 sys.exit(main(sys.argv)) | 494 sys.exit(main(sys.argv)) |
OLD | NEW |