Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(334)

Side by Side Diff: build/android/run_tests.py

Issue 10051021: apk-based test runner work for android (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs all the native unit tests. 6 """Runs all the native unit tests.
7 7
8 1. Copy over test binary to /data/local on device. 8 1. Copy over test binary to /data/local on device.
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
10 to be deployed to the device (in /data/local/tmp). 10 to be deployed to the device (in /data/local/tmp).
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 import emulator 63 import emulator
64 import run_tests_helper 64 import run_tests_helper
65 from single_test_runner import SingleTestRunner 65 from single_test_runner import SingleTestRunner
66 from test_package_executable import TestPackageExecutable 66 from test_package_executable import TestPackageExecutable
67 from test_result import BaseTestResult, TestResults 67 from test_result import BaseTestResult, TestResults
68 68
69 _TEST_SUITES = ['base_unittests', 'sql_unittests', 'ipc_tests', 'net_unittests', 69 _TEST_SUITES = ['base_unittests', 'sql_unittests', 'ipc_tests', 'net_unittests',
70 'sync_unit_tests', 'content_unittests'] 70 'sync_unit_tests', 'content_unittests']
71 71
72 72
73 def FullyQualifiedTestSuites(): 73 def FullyQualifiedTestSuites(apk):
74 """Return a fully qualified list that represents all known suites.""" 74 """Return a fully qualified list that represents all known suites.
75
76 Args:
77 apk: if True, use the apk-based test runner"""
75 # If not specified, assume the test suites are in out/Release 78 # If not specified, assume the test suites are in out/Release
76 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, 79 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR,
77 'out', 'Release')) 80 'out', 'Release'))
78 return [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] 81 if apk:
82 # out/Release/$SUITE_apk/ChromeNativeTests-debug.apk
83 suites = [os.path.join(test_suite_dir,
84 t + '_apk',
85 'ChromeNativeTests-debug.apk')
86 for t in _TEST_SUITES]
87 else:
88 suites = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES]
89 return suites
79 90
80 91
81 class TimeProfile(object): 92 class TimeProfile(object):
82 """Class for simple profiling of action, with logging of cost.""" 93 """Class for simple profiling of action, with logging of cost."""
83 94
84 def __init__(self, description): 95 def __init__(self, description):
85 self._description = description 96 self._description = description
86 self.Start() 97 self.Start()
87 98
88 def Start(self): 99 def Start(self):
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
137 try: 148 try:
138 os.kill(self._pid, signal.SIGKILL) 149 os.kill(self._pid, signal.SIGKILL)
139 except: 150 except:
140 pass 151 pass
141 del os.environ['DISPLAY'] 152 del os.environ['DISPLAY']
142 self._pid = 0 153 self._pid = 0
143 154
144 155
145 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, 156 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
146 timeout, performance_test, cleanup_test_files, tool, 157 timeout, performance_test, cleanup_test_files, tool,
147 log_dump_name, annotate=False): 158 log_dump_name, apk, annotate=False):
148 """Runs the tests. 159 """Runs the tests.
149 160
150 Args: 161 Args:
151 device: Device to run the tests. 162 device: Device to run the tests.
152 test_suite: A specific test suite to run, empty to run all. 163 test_suite: A specific test suite to run, empty to run all.
153 gtest_filter: A gtest_filter flag. 164 gtest_filter: A gtest_filter flag.
154 test_arguments: Additional arguments to pass to the test binary. 165 test_arguments: Additional arguments to pass to the test binary.
155 rebaseline: Whether or not to run tests in isolation and update the filter. 166 rebaseline: Whether or not to run tests in isolation and update the filter.
156 timeout: Timeout for each test. 167 timeout: Timeout for each test.
157 performance_test: Whether or not performance test(s). 168 performance_test: Whether or not performance test(s).
158 cleanup_test_files: Whether or not to cleanup test files on device. 169 cleanup_test_files: Whether or not to cleanup test files on device.
159 tool: Name of the Valgrind tool. 170 tool: Name of the Valgrind tool.
160 log_dump_name: Name of log dump file. 171 log_dump_name: Name of log dump file.
172 apk: boolean to state if we are using the apk based test runner
161 annotate: should we print buildbot-style annotations? 173 annotate: should we print buildbot-style annotations?
162 174
163 Returns: 175 Returns:
164 A TestResults object. 176 A TestResults object.
165 """ 177 """
166 results = [] 178 results = []
167 179
168 if test_suite: 180 if test_suite:
169 global _TEST_SUITES 181 global _TEST_SUITES
170 if not os.path.exists(test_suite): 182 if (not os.path.exists(test_suite) and
183 not os.path.splitext(test_suite)[1] == '.apk'):
171 logging.critical('Unrecognized test suite %s, supported: %s' % 184 logging.critical('Unrecognized test suite %s, supported: %s' %
172 (test_suite, _TEST_SUITES)) 185 (test_suite, _TEST_SUITES))
173 if test_suite in _TEST_SUITES: 186 if test_suite in _TEST_SUITES:
174 logging.critical('(Remember to include the path: out/Release/%s)', 187 logging.critical('(Remember to include the path: out/Release/%s)',
175 test_suite) 188 test_suite)
176 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) 189 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')],
190 False, False)
177 fully_qualified_test_suites = [test_suite] 191 fully_qualified_test_suites = [test_suite]
178 else: 192 else:
179 fully_qualified_test_suites = FullyQualifiedTestSuites() 193 fully_qualified_test_suites = FullyQualifiedTestSuites(apk)
180 debug_info_list = [] 194 debug_info_list = []
181 print 'Known suites: ' + str(_TEST_SUITES) 195 print 'Known suites: ' + str(_TEST_SUITES)
182 print 'Running these: ' + str(fully_qualified_test_suites) 196 print 'Running these: ' + str(fully_qualified_test_suites)
183 for t in fully_qualified_test_suites: 197 for t in fully_qualified_test_suites:
184 if annotate: 198 if annotate:
185 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t) 199 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
186 test = SingleTestRunner(device, t, gtest_filter, test_arguments, 200 test = SingleTestRunner(device, t, gtest_filter, test_arguments,
187 timeout, rebaseline, performance_test, 201 timeout, rebaseline, performance_test,
188 cleanup_test_files, tool, 0, not not log_dump_name) 202 cleanup_test_files, tool, 0, not not log_dump_name)
189 test.Run() 203 test.Run()
190 204
191 results += [test.test_results] 205 results += [test.test_results]
192 # Collect debug info. 206 # Collect debug info.
193 debug_info_list += [test.dump_debug_info] 207 debug_info_list += [test.dump_debug_info]
194 if rebaseline: 208 if rebaseline:
195 test.UpdateFilter(test.test_results.failed) 209 test.UpdateFilter(test.test_results.failed)
196 elif test.test_results.failed: 210 elif test.test_results.failed:
197 test.test_results.LogFull() 211 test.test_results.LogFull()
198 # Zip all debug info outputs into a file named by log_dump_name. 212 # Zip all debug info outputs into a file named by log_dump_name.
199 debug_info.GTestDebugInfo.ZipAndCleanResults( 213 debug_info.GTestDebugInfo.ZipAndCleanResults(
200 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', 214 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
201 'debug_info_dumps'), 215 'debug_info_dumps'),
202 log_dump_name, [d for d in debug_info_list if d]) 216 log_dump_name, [d for d in debug_info_list if d])
203 217
204 if annotate: 218 if annotate:
205 if test.test_results.timed_out: 219 if test.test_results.timed_out:
206 print '@@@STEP_WARNINGS@@@' 220 print '@@@STEP_WARNINGS@@@'
207 elif test.test_results.failed: 221 elif test.test_results.failed:
208 print '@@@STEP_FAILURE@@@' 222 print '@@@STEP_FAILURE@@@'
223 elif test.test_results.overall_fail:
224 print '@@@STEP_FAILURE@@@'
209 else: 225 else:
210 print 'Step success!' # No annotation needed 226 print 'Step success!' # No annotation needed
211 227
212 return TestResults.FromTestResults(results) 228 return TestResults.FromTestResults(results)
213 229
214 230
215 class TestSharder(BaseTestSharder): 231 class TestSharder(BaseTestSharder):
216 """Responsible for sharding the tests on the connected devices.""" 232 """Responsible for sharding the tests on the connected devices."""
217 233
218 def __init__(self, attached_devices, test_suite, gtest_filter, 234 def __init__(self, attached_devices, test_suite, gtest_filter,
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
309 options.performance_test, 325 options.performance_test,
310 options.cleanup_test_files, options.tool) 326 options.cleanup_test_files, options.tool)
311 test_results = sharder.RunShardedTests() 327 test_results = sharder.RunShardedTests()
312 else: 328 else:
313 test_results = RunTests(attached_devices[0], options.test_suite, 329 test_results = RunTests(attached_devices[0], options.test_suite,
314 options.gtest_filter, options.test_arguments, 330 options.gtest_filter, options.test_arguments,
315 options.rebaseline, options.timeout, 331 options.rebaseline, options.timeout,
316 options.performance_test, 332 options.performance_test,
317 options.cleanup_test_files, options.tool, 333 options.cleanup_test_files, options.tool,
318 options.log_dump, 334 options.log_dump,
335 options.apk,
319 annotate=options.annotate) 336 annotate=options.annotate)
320 337
321 for buildbot_emulator in buildbot_emulators: 338 for buildbot_emulator in buildbot_emulators:
322 buildbot_emulator.Shutdown() 339 buildbot_emulator.Shutdown()
323 340
324 # Another chance if we timed out? At this point It is safe(r) to 341 # Another chance if we timed out? At this point It is safe(r) to
325 # run fast and loose since we just uploaded all the test data and 342 # run fast and loose since we just uploaded all the test data and
326 # binary. 343 # binary.
327 if test_results.timed_out and options.repeat: 344 if test_results.timed_out and options.repeat:
328 logging.critical('Timed out; repeating in fast_and_loose mode.') 345 logging.critical('Timed out; repeating in fast_and_loose mode.')
(...skipping 20 matching lines...) Expand all
349 ListTestSuites() 366 ListTestSuites()
350 return 0 367 return 0
351 368
352 if options.use_xvfb: 369 if options.use_xvfb:
353 xvfb = Xvfb() 370 xvfb = Xvfb()
354 xvfb.Start() 371 xvfb.Start()
355 372
356 if options.test_suite: 373 if options.test_suite:
357 all_test_suites = [options.test_suite] 374 all_test_suites = [options.test_suite]
358 else: 375 else:
359 all_test_suites = FullyQualifiedTestSuites() 376 all_test_suites = FullyQualifiedTestSuites(options.apk)
360 failures = 0 377 failures = 0
361 for suite in all_test_suites: 378 for suite in all_test_suites:
362 options.test_suite = suite 379 options.test_suite = suite
363 failures += _RunATestSuite(options) 380 failures += _RunATestSuite(options)
364 381
365 if options.use_xvfb: 382 if options.use_xvfb:
366 xvfb.Stop() 383 xvfb.Stop()
367 return failures 384 return failures
368 385
369 386
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
411 'tests that hang to add to the disabled list, ' 428 'tests that hang to add to the disabled list, '
412 'there is no need to redeploy the test binary ' 429 'there is no need to redeploy the test binary '
413 'or data to the device again. ' 430 'or data to the device again. '
414 'Don\'t use on bots by default!') 431 'Don\'t use on bots by default!')
415 option_parser.add_option('--repeat', dest='repeat', type='int', 432 option_parser.add_option('--repeat', dest='repeat', type='int',
416 default=2, 433 default=2,
417 help='Repeat count on test timeout') 434 help='Repeat count on test timeout')
418 option_parser.add_option('--annotate', default=True, 435 option_parser.add_option('--annotate', default=True,
419 help='Print buildbot-style annotate messages ' 436 help='Print buildbot-style annotate messages '
420 'for each test suite. Default=True') 437 'for each test suite. Default=True')
438 option_parser.add_option('--apk', default=False,
439 help='Use the apk test runner '
440 '(off by default for now)')
421 options, args = option_parser.parse_args(argv) 441 options, args = option_parser.parse_args(argv)
422 if len(args) > 1: 442 if len(args) > 1:
423 print 'Unknown argument:', args[1:] 443 print 'Unknown argument:', args[1:]
424 option_parser.print_usage() 444 option_parser.print_usage()
425 sys.exit(1) 445 sys.exit(1)
426 run_tests_helper.SetLogLevel(options.verbose_count) 446 run_tests_helper.SetLogLevel(options.verbose_count)
427 return Dispatch(options) 447 return Dispatch(options)
428 448
429 449
430 if __name__ == '__main__': 450 if __name__ == '__main__':
431 sys.exit(main(sys.argv)) 451 sys.exit(main(sys.argv))
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698