Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(82)

Side by Side Diff: build/android/run_tests.py

Issue 10689132: [android] Upstream / sync most of build/android and build/android/pylib. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « build/android/pylib/valgrind_tools.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all the native unit tests. 7 """Runs all the native unit tests.
8 8
9 1. Copy over test binary to /data/local on device. 9 1. Copy over test binary to /data/local on device.
10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) 10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 ReadOnlyFileUtilTest.ContentsEqual 43 ReadOnlyFileUtilTest.ContentsEqual
44 44
45 This file is generated by the tests running on devices. If running on emulator, 45 This file is generated by the tests running on devices. If running on emulator,
46 additonal filter file which lists the tests only failed in emulator will be 46 additonal filter file which lists the tests only failed in emulator will be
47 loaded. We don't care about the rare testcases which succeeded on emuatlor, but 47 loaded. We don't care about the rare testcases which succeeded on emuatlor, but
48 failed on device. 48 failed on device.
49 """ 49 """
50 50
51 import fnmatch 51 import fnmatch
52 import logging 52 import logging
53 import multiprocessing
54 import os 53 import os
55 import re
56 import subprocess 54 import subprocess
57 import sys 55 import sys
58 import time 56 import time
59 57
60 from pylib import android_commands 58 from pylib import android_commands
61 from pylib.base_test_sharder import BaseTestSharder 59 from pylib.base_test_sharder import BaseTestSharder
62 from pylib import cmd_helper 60 from pylib import constants
63 from pylib import debug_info 61 from pylib import debug_info
64 import emulator 62 import emulator
63 from pylib import ports
65 from pylib import run_tests_helper 64 from pylib import run_tests_helper
65 from pylib import test_options_parser
66 from pylib.single_test_runner import SingleTestRunner 66 from pylib.single_test_runner import SingleTestRunner
67 from pylib.test_package_executable import TestPackageExecutable
68 from pylib.test_result import BaseTestResult, TestResults 67 from pylib.test_result import BaseTestResult, TestResults
69 68
70 _TEST_SUITES = ['base_unittests', 69 _TEST_SUITES = ['base_unittests',
71 'content_unittests', 70 'content_unittests',
72 'gpu_unittests', 71 'gpu_unittests',
73 'ipc_tests', 72 'ipc_tests',
74 'net_unittests', 73 'net_unittests',
75 'sql_unittests', 74 'sql_unittests',
76 'sync_unit_tests', 75 'sync_unit_tests',
77 'ui_unittests', 76 'ui_unittests',
78 ] 77 ]
79 78
80 def FullyQualifiedTestSuites(apk): 79 def FullyQualifiedTestSuites(apk):
81 """Return a fully qualified list that represents all known suites. 80 """Return a fully qualified list that represents all known suites.
82 81
83 Args: 82 Args:
84 apk: if True, use the apk-based test runner""" 83 apk: if True, use the apk-based test runner"""
85 # If not specified, assume the test suites are in out/Release 84 # If not specified, assume the test suites are in out/Release
86 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, 85 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR,
87 'out', 'Release')) 86 'out', 'Release'))
88 if apk: 87 if apk:
89 # out/Release/$SUITE_apk/$SUITE-debug.apk 88 # out/Release/$SUITE_apk/$SUITE-debug.apk
90 suites = [os.path.join(test_suite_dir, 89 suites = [os.path.join(test_suite_dir,
91 t + '_apk', 90 t + '_apk',
92 t + '-debug.apk') 91 t + '-debug.apk')
93 for t in _TEST_SUITES] 92 for t in _TEST_SUITES]
94 else: 93 else:
95 suites = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] 94 suites = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES]
96 return suites 95 return suites
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
187 cleanup_test_files: Whether or not to cleanup test files on device. 186 cleanup_test_files: Whether or not to cleanup test files on device.
188 tool: Name of the Valgrind tool. 187 tool: Name of the Valgrind tool.
189 log_dump_name: Name of log dump file. 188 log_dump_name: Name of log dump file.
190 apk: boolean to state if we are using the apk based test runner 189 apk: boolean to state if we are using the apk based test runner
191 annotate: should we print buildbot-style annotations? 190 annotate: should we print buildbot-style annotations?
192 191
193 Returns: 192 Returns:
194 A TestResults object. 193 A TestResults object.
195 """ 194 """
196 results = [] 195 results = []
196 global _TEST_SUITES
197 197
198 if test_suite: 198 if test_suite:
199 global _TEST_SUITES 199 global _TEST_SUITES
200 if (not os.path.exists(test_suite) and 200 if (not os.path.exists(test_suite)):
201 not os.path.splitext(test_suite)[1] == '.apk'):
202 logging.critical('Unrecognized test suite %s, supported: %s' % 201 logging.critical('Unrecognized test suite %s, supported: %s' %
203 (test_suite, _TEST_SUITES)) 202 (test_suite, _TEST_SUITES))
204 if test_suite in _TEST_SUITES: 203 if test_suite in _TEST_SUITES:
205 logging.critical('(Remember to include the path: out/Release/%s)', 204 logging.critical('(Remember to include the path: out/Release/%s)',
206 test_suite) 205 test_suite)
207 return TestResults.FromRun(failed=[BaseTestResult(test_suite, '')]) 206 test_suite_basename = os.path.basename(test_suite)
207 if test_suite_basename in _TEST_SUITES:
208 logging.critical('Try "make -j15 %s"' % test_suite_basename)
209 else:
210 logging.critical('Unrecognized test suite, supported: %s' %
211 _TEST_SUITES)
212 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')],
213 False, False)
208 fully_qualified_test_suites = [test_suite] 214 fully_qualified_test_suites = [test_suite]
209 else: 215 else:
210 fully_qualified_test_suites = FullyQualifiedTestSuites(apk) 216 fully_qualified_test_suites = FullyQualifiedTestSuites(apk)
211 debug_info_list = [] 217 debug_info_list = []
212 print 'Known suites: ' + str(_TEST_SUITES) 218 print 'Known suites: ' + str(_TEST_SUITES)
213 print 'Running these: ' + str(fully_qualified_test_suites) 219 print 'Running these: ' + str(fully_qualified_test_suites)
214 for t in fully_qualified_test_suites: 220 for t in fully_qualified_test_suites:
215 if annotate: 221 if annotate:
216 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t) 222 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
217 test = SingleTestRunner(device, t, gtest_filter, test_arguments, 223 test = SingleTestRunner(device, t, gtest_filter, test_arguments,
218 timeout, rebaseline, performance_test, 224 timeout, rebaseline, performance_test,
219 cleanup_test_files, tool, 0, not not log_dump_name) 225 cleanup_test_files, tool, 0, not not log_dump_name)
220 test.Run() 226 test.Run()
221 227
222 results += [test.test_results] 228 results += [test.test_results]
223 # Collect debug info. 229 # Collect debug info.
224 debug_info_list += [test.dump_debug_info] 230 debug_info_list += [test.dump_debug_info]
225 if rebaseline: 231 if rebaseline:
226 test.UpdateFilter(test.test_results.failed) 232 test.UpdateFilter(test.test_results.failed)
227 test.test_results.LogFull() 233 test.test_results.LogFull('Unit test', os.path.basename(t))
228 # Zip all debug info outputs into a file named by log_dump_name. 234 # Zip all debug info outputs into a file named by log_dump_name.
229 debug_info.GTestDebugInfo.ZipAndCleanResults( 235 debug_info.GTestDebugInfo.ZipAndCleanResults(
230 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', 236 os.path.join(constants.CHROME_DIR, 'out', 'Release',
231 'debug_info_dumps'), 237 'debug_info_dumps'),
232 log_dump_name, [d for d in debug_info_list if d]) 238 log_dump_name, [d for d in debug_info_list if d])
233 239
234 if annotate: 240 if annotate:
235 PrintAnnotationForTestResults(test.test_results) 241 PrintAnnotationForTestResults(test.test_results)
236 242
237 return TestResults.FromTestResults(results) 243 return TestResults.FromTestResults(results)
238 244
239 245
240 class TestSharder(BaseTestSharder): 246 class TestSharder(BaseTestSharder):
241 """Responsible for sharding the tests on the connected devices.""" 247 """Responsible for sharding the tests on the connected devices."""
242 248
243 def __init__(self, attached_devices, test_suite, gtest_filter, 249 def __init__(self, attached_devices, test_suite, gtest_filter,
244 test_arguments, timeout, rebaseline, performance_test, 250 test_arguments, timeout, rebaseline, performance_test,
245 cleanup_test_files, tool, annotate): 251 cleanup_test_files, tool, annotate):
246 BaseTestSharder.__init__(self, attached_devices) 252 BaseTestSharder.__init__(self, attached_devices)
247 self.test_suite = test_suite 253 self.test_suite = test_suite
248 self.test_suite_basename = os.path.basename(test_suite) 254 self.test_suite_basename = os.path.basename(test_suite)
249 self.gtest_filter = gtest_filter 255 self.gtest_filter = gtest_filter
250 self.test_arguments = test_arguments 256 self.test_arguments = test_arguments
251 self.timeout = timeout 257 self.timeout = timeout
252 self.rebaseline = rebaseline 258 self.rebaseline = rebaseline
253 self.performance_test = performance_test 259 self.performance_test = performance_test
254 self.cleanup_test_files = cleanup_test_files 260 self.cleanup_test_files = cleanup_test_files
255 self.tool = tool 261 self.tool = tool
256 self.annotate = annotate 262 self.annotate = annotate
257 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, 263 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter,
258 test_arguments, timeout, rebaseline, 264 test_arguments, timeout, rebaseline,
259 performance_test, cleanup_test_files, tool, 0) 265 performance_test, cleanup_test_files, tool, 0)
266 # The executable/apk needs to be copied before we can call GetAllTests.
267 test.test_package.StripAndCopyExecutable()
260 all_tests = test.test_package.GetAllTests() 268 all_tests = test.test_package.GetAllTests()
261 if not rebaseline: 269 if not rebaseline:
262 disabled_list = test.GetDisabledTests() 270 disabled_list = test.GetDisabledTests()
263 # Only includes tests that do not have any match in the disabled list. 271 # Only includes tests that do not have any match in the disabled list.
264 all_tests = filter(lambda t: 272 all_tests = filter(lambda t:
265 not any([fnmatch.fnmatch(t, disabled_pattern) 273 not any([fnmatch.fnmatch(t, disabled_pattern)
266 for disabled_pattern in disabled_list]), 274 for disabled_pattern in disabled_list]),
267 all_tests) 275 all_tests)
268 self.tests = all_tests 276 self.tests = all_tests
269 277
270 def CreateShardedTestRunner(self, device, index): 278 def CreateShardedTestRunner(self, device, index):
271 """Creates a suite-specific test runner. 279 """Creates a suite-specific test runner.
272 280
273 Args: 281 Args:
274 device: Device serial where this shard will run. 282 device: Device serial where this shard will run.
275 index: Index of this device in the pool. 283 index: Index of this device in the pool.
276 284
277 Returns: 285 Returns:
278 A SingleTestRunner object. 286 A SingleTestRunner object.
279 """ 287 """
280 shard_size = len(self.tests) / len(self.attached_devices) 288 device_num = len(self.attached_devices)
289 shard_size = (len(self.tests) + device_num - 1) / device_num
281 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] 290 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
282 test_filter = ':'.join(shard_test_list) 291 test_filter = ':'.join(shard_test_list)
283 return SingleTestRunner(device, self.test_suite, 292 return SingleTestRunner(device, self.test_suite,
284 test_filter, self.test_arguments, self.timeout, 293 test_filter, self.test_arguments, self.timeout,
285 self.rebaseline, self.performance_test, 294 self.rebaseline, self.performance_test,
286 self.cleanup_test_files, self.tool, index) 295 self.cleanup_test_files, self.tool, index)
287 296
288 def OnTestsCompleted(self, test_runners, test_results): 297 def OnTestsCompleted(self, test_runners, test_results):
289 """Notifies that we completed the tests.""" 298 """Notifies that we completed the tests."""
290 test_results.LogFull() 299 test_results.LogFull('Unit test', os.path.basename(self.test_suite))
291 if self.annotate: 300 if self.annotate:
292 PrintAnnotationForTestResults(test_results) 301 PrintAnnotationForTestResults(test_results)
293 if test_results.failed and self.rebaseline: 302 if test_results.failed and self.rebaseline:
294 test_runners[0].UpdateFilter(test_results.failed) 303 test_runners[0].UpdateFilter(test_results.failed)
295 304
296 305
297 306
298 def _RunATestSuite(options): 307 def _RunATestSuite(options):
299 """Run a single test suite. 308 """Run a single test suite.
300 309
(...skipping 25 matching lines...) Expand all
326 attached_devices = [options.test_device] 335 attached_devices = [options.test_device]
327 else: 336 else:
328 attached_devices = android_commands.GetAttachedDevices() 337 attached_devices = android_commands.GetAttachedDevices()
329 338
330 if not attached_devices: 339 if not attached_devices:
331 logging.critical('A device must be attached and online.') 340 logging.critical('A device must be attached and online.')
332 if options.annotate: 341 if options.annotate:
333 print '@@@STEP_FAILURE@@@' 342 print '@@@STEP_FAILURE@@@'
334 return 1 343 return 1
335 344
345 # Reset the test port allocation. It's important to do it before starting
346 # to dispatch any tests.
347 if not ports.ResetTestServerPortAllocation():
348 raise Exception('Failed to reset test server port.')
349
336 if (len(attached_devices) > 1 and options.test_suite and 350 if (len(attached_devices) > 1 and options.test_suite and
337 not options.gtest_filter and not options.performance_test): 351 not options.gtest_filter and not options.performance_test):
338 sharder = TestSharder(attached_devices, options.test_suite, 352 sharder = TestSharder(attached_devices, options.test_suite,
339 options.gtest_filter, options.test_arguments, 353 options.gtest_filter, options.test_arguments,
340 options.timeout, options.rebaseline, 354 options.timeout, options.rebaseline,
341 options.performance_test, 355 options.performance_test,
342 options.cleanup_test_files, options.tool, 356 options.cleanup_test_files, options.tool,
343 options.annotate) 357 options.annotate)
344 test_results = sharder.RunShardedTests() 358 test_results = sharder.RunShardedTests()
345 else: 359 else:
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
403 417
404 def ListTestSuites(): 418 def ListTestSuites():
405 """Display a list of available test suites 419 """Display a list of available test suites
406 """ 420 """
407 print 'Available test suites are:' 421 print 'Available test suites are:'
408 for test_suite in _TEST_SUITES: 422 for test_suite in _TEST_SUITES:
409 print test_suite 423 print test_suite
410 424
411 425
412 def main(argv): 426 def main(argv):
413 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None, 427 option_parser = test_options_parser.CreateTestRunnerOptionParser(None,
414 default_timeout=0) 428 default_timeout=0)
415 option_parser.add_option('-s', '--suite', dest='test_suite', 429 option_parser.add_option('-s', '--suite', dest='test_suite',
416 help='Executable name of the test suite to run ' 430 help='Executable name of the test suite to run '
417 '(use -s help to list them)') 431 '(use -s help to list them)')
418 option_parser.add_option('-d', '--device', dest='test_device', 432 option_parser.add_option('-d', '--device', dest='test_device',
419 help='Target device the test suite to run ') 433 help='Target device the test suite to run ')
420 option_parser.add_option('-r', dest='rebaseline', 434 option_parser.add_option('-r', dest='rebaseline',
421 help='Rebaseline and update *testsuite_disabled', 435 help='Rebaseline and update *testsuite_disabled',
422 action='store_true', 436 action='store_true',
423 default=False) 437 default=False)
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
472 # from all suites, but the buildbot associates the exit status only with the 486 # from all suites, but the buildbot associates the exit status only with the
473 # most recent step). 487 # most recent step).
474 if options.annotate: 488 if options.annotate:
475 return 0 489 return 0
476 else: 490 else:
477 return failed_tests_count 491 return failed_tests_count
478 492
479 493
480 if __name__ == '__main__': 494 if __name__ == '__main__':
481 sys.exit(main(sys.argv)) 495 sys.exit(main(sys.argv))
OLDNEW
« no previous file with comments | « build/android/pylib/valgrind_tools.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698