OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Runs all the native unit tests. | 6 """Runs all the native unit tests. |
7 | 7 |
8 1. Copy over test binary to /data/local on device. | 8 1. Copy over test binary to /data/local on device. |
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) | 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) |
10 to be deployed to the device (in /data/local/tmp). | 10 to be deployed to the device (in /data/local/tmp). |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
159 def Stop(self): | 159 def Stop(self): |
160 """Stop Xvfb if needed. Linux only.""" | 160 """Stop Xvfb if needed. Linux only.""" |
161 if self._pid: | 161 if self._pid: |
162 try: | 162 try: |
163 os.kill(self._pid, signal.SIGKILL) | 163 os.kill(self._pid, signal.SIGKILL) |
164 except: | 164 except: |
165 pass | 165 pass |
166 del os.environ['DISPLAY'] | 166 del os.environ['DISPLAY'] |
167 self._pid = 0 | 167 self._pid = 0 |
168 | 168 |
| 169 def PrintAnnotationForTestResults(test_results): |
| 170 if test_results.timed_out: |
| 171 print '@@@STEP_WARNINGS@@@' |
| 172 elif test_results.failed: |
| 173 print '@@@STEP_FAILURE@@@' |
| 174 elif test_results.crashed: |
| 175 print '@@@STEP_FAILURE@@@' |
| 176 elif test_results.overall_fail: |
| 177 print '@@@STEP_FAILURE@@@' |
| 178 else: |
| 179 print 'Step success!' # No annotation needed |
169 | 180 |
170 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, | 181 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, |
171 timeout, performance_test, cleanup_test_files, tool, | 182 timeout, performance_test, cleanup_test_files, tool, |
172 log_dump_name, apk, annotate=False): | 183 log_dump_name, apk, annotate=False): |
173 """Runs the tests. | 184 """Runs the tests. |
174 | 185 |
175 Args: | 186 Args: |
176 device: Device to run the tests. | 187 device: Device to run the tests. |
177 test_suite: A specific test suite to run, empty to run all. | 188 test_suite: A specific test suite to run, empty to run all. |
178 gtest_filter: A gtest_filter flag. | 189 gtest_filter: A gtest_filter flag. |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
221 if rebaseline: | 232 if rebaseline: |
222 test.UpdateFilter(test.test_results.failed) | 233 test.UpdateFilter(test.test_results.failed) |
223 test.test_results.LogFull() | 234 test.test_results.LogFull() |
224 # Zip all debug info outputs into a file named by log_dump_name. | 235 # Zip all debug info outputs into a file named by log_dump_name. |
225 debug_info.GTestDebugInfo.ZipAndCleanResults( | 236 debug_info.GTestDebugInfo.ZipAndCleanResults( |
226 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', | 237 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', |
227 'debug_info_dumps'), | 238 'debug_info_dumps'), |
228 log_dump_name, [d for d in debug_info_list if d]) | 239 log_dump_name, [d for d in debug_info_list if d]) |
229 | 240 |
230 if annotate: | 241 if annotate: |
231 if test.test_results.timed_out: | 242 PrintAnnotationForTestResults(test.test_results) |
232 print '@@@STEP_WARNINGS@@@' | |
233 elif test.test_results.failed: | |
234 print '@@@STEP_FAILURE@@@' | |
235 elif test.test_results.overall_fail: | |
236 print '@@@STEP_FAILURE@@@' | |
237 else: | |
238 print 'Step success!' # No annotation needed | |
239 | 243 |
240 return TestResults.FromTestResults(results) | 244 return TestResults.FromTestResults(results) |
241 | 245 |
242 | 246 |
243 class TestSharder(BaseTestSharder): | 247 class TestSharder(BaseTestSharder): |
244 """Responsible for sharding the tests on the connected devices.""" | 248 """Responsible for sharding the tests on the connected devices.""" |
245 | 249 |
246 def __init__(self, attached_devices, test_suite, gtest_filter, | 250 def __init__(self, attached_devices, test_suite, gtest_filter, |
247 test_arguments, timeout, rebaseline, performance_test, | 251 test_arguments, timeout, rebaseline, performance_test, |
248 cleanup_test_files, tool): | 252 cleanup_test_files, tool, annotate): |
249 BaseTestSharder.__init__(self, attached_devices) | 253 BaseTestSharder.__init__(self, attached_devices) |
250 self.test_suite = test_suite | 254 self.test_suite = test_suite |
251 self.test_suite_basename = os.path.basename(test_suite) | 255 self.test_suite_basename = os.path.basename(test_suite) |
252 self.gtest_filter = gtest_filter | 256 self.gtest_filter = gtest_filter |
253 self.test_arguments = test_arguments | 257 self.test_arguments = test_arguments |
254 self.timeout = timeout | 258 self.timeout = timeout |
255 self.rebaseline = rebaseline | 259 self.rebaseline = rebaseline |
256 self.performance_test = performance_test | 260 self.performance_test = performance_test |
257 self.cleanup_test_files = cleanup_test_files | 261 self.cleanup_test_files = cleanup_test_files |
258 self.tool = tool | 262 self.tool = tool |
| 263 self.annotate = annotate |
259 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, | 264 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, |
260 test_arguments, timeout, rebaseline, | 265 test_arguments, timeout, rebaseline, |
261 performance_test, cleanup_test_files, tool, 0) | 266 performance_test, cleanup_test_files, tool, 0) |
262 all_tests = test.test_package.GetAllTests() | 267 all_tests = test.test_package.GetAllTests() |
263 if not rebaseline: | 268 if not rebaseline: |
264 disabled_list = test.GetDisabledTests() | 269 disabled_list = test.GetDisabledTests() |
265 # Only includes tests that do not have any match in the disabled list. | 270 # Only includes tests that do not have any match in the disabled list. |
266 all_tests = filter(lambda t: | 271 all_tests = filter(lambda t: |
267 not any([fnmatch.fnmatch(t, disabled_pattern) | 272 not any([fnmatch.fnmatch(t, disabled_pattern) |
268 for disabled_pattern in disabled_list]), | 273 for disabled_pattern in disabled_list]), |
(...skipping 14 matching lines...) Expand all Loading... |
283 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] | 288 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] |
284 test_filter = ':'.join(shard_test_list) | 289 test_filter = ':'.join(shard_test_list) |
285 return SingleTestRunner(device, self.test_suite, | 290 return SingleTestRunner(device, self.test_suite, |
286 test_filter, self.test_arguments, self.timeout, | 291 test_filter, self.test_arguments, self.timeout, |
287 self.rebaseline, self.performance_test, | 292 self.rebaseline, self.performance_test, |
288 self.cleanup_test_files, self.tool, index) | 293 self.cleanup_test_files, self.tool, index) |
289 | 294 |
290 def OnTestsCompleted(self, test_runners, test_results): | 295 def OnTestsCompleted(self, test_runners, test_results): |
291 """Notifies that we completed the tests.""" | 296 """Notifies that we completed the tests.""" |
292 test_results.LogFull() | 297 test_results.LogFull() |
| 298 if self.annotate: |
| 299 PrintAnnotationForTestResults(test_results) |
293 if test_results.failed and self.rebaseline: | 300 if test_results.failed and self.rebaseline: |
294 test_runners[0].UpdateFilter(test_results.failed) | 301 test_runners[0].UpdateFilter(test_results.failed) |
295 | 302 |
296 | 303 |
297 | 304 |
298 def _RunATestSuite(options): | 305 def _RunATestSuite(options): |
299 """Run a single test suite. | 306 """Run a single test suite. |
300 | 307 |
301 Helper for Dispatch() to allow stop/restart of the emulator across | 308 Helper for Dispatch() to allow stop/restart of the emulator across |
302 test bundles. If using the emulator, we start it on entry and stop | 309 test bundles. If using the emulator, we start it on entry and stop |
(...skipping 25 matching lines...) Expand all Loading... |
328 if not attached_devices: | 335 if not attached_devices: |
329 logging.critical('A device must be attached and online.') | 336 logging.critical('A device must be attached and online.') |
330 return 1 | 337 return 1 |
331 | 338 |
332 if (len(attached_devices) > 1 and options.test_suite and | 339 if (len(attached_devices) > 1 and options.test_suite and |
333 not options.gtest_filter and not options.performance_test): | 340 not options.gtest_filter and not options.performance_test): |
334 sharder = TestSharder(attached_devices, options.test_suite, | 341 sharder = TestSharder(attached_devices, options.test_suite, |
335 options.gtest_filter, options.test_arguments, | 342 options.gtest_filter, options.test_arguments, |
336 options.timeout, options.rebaseline, | 343 options.timeout, options.rebaseline, |
337 options.performance_test, | 344 options.performance_test, |
338 options.cleanup_test_files, options.tool) | 345 options.cleanup_test_files, options.tool, |
| 346 options.annotate) |
339 test_results = sharder.RunShardedTests() | 347 test_results = sharder.RunShardedTests() |
340 else: | 348 else: |
341 test_results = RunTests(attached_devices[0], options.test_suite, | 349 test_results = RunTests(attached_devices[0], options.test_suite, |
342 options.gtest_filter, options.test_arguments, | 350 options.gtest_filter, options.test_arguments, |
343 options.rebaseline, options.timeout, | 351 options.rebaseline, options.timeout, |
344 options.performance_test, | 352 options.performance_test, |
345 options.cleanup_test_files, options.tool, | 353 options.cleanup_test_files, options.tool, |
346 options.log_dump, | 354 options.log_dump, |
347 options.apk, | 355 options.apk, |
348 annotate=options.annotate) | 356 annotate=options.annotate) |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
466 # from all suites, but the buildbot associates the exit status only with the | 474 # from all suites, but the buildbot associates the exit status only with the |
467 # most recent step). | 475 # most recent step). |
468 if options.annotate: | 476 if options.annotate: |
469 return 0 | 477 return 0 |
470 else: | 478 else: |
471 return failed_tests_count | 479 return failed_tests_count |
472 | 480 |
473 | 481 |
474 if __name__ == '__main__': | 482 if __name__ == '__main__': |
475 sys.exit(main(sys.argv)) | 483 sys.exit(main(sys.argv)) |
OLD | NEW |