OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all types of tests from one unified interface. | 7 """Runs all types of tests from one unified interface. |
8 | 8 |
9 TODO(gkanwar): | 9 TODO(gkanwar): |
10 * Add options to run Monkey tests. | 10 * Add options to run Monkey tests. |
11 """ | 11 """ |
12 | 12 |
13 import collections | 13 import collections |
14 import optparse | 14 import optparse |
15 import os | 15 import os |
16 import shutil | 16 import shutil |
17 import sys | 17 import sys |
18 | 18 |
19 from pylib import constants | 19 from pylib import constants |
20 from pylib import ports | 20 from pylib import ports |
21 from pylib.base import base_test_result | 21 from pylib.base import base_test_result |
22 from pylib.base import test_dispatcher | 22 from pylib.base import test_dispatcher |
23 from pylib.gtest import gtest_config | 23 from pylib.gtest import gtest_config |
24 from pylib.gtest import setup as gtest_setup | 24 from pylib.gtest import setup as gtest_setup |
25 from pylib.gtest import test_options as gtest_test_options | 25 from pylib.gtest import test_options as gtest_test_options |
26 from pylib.host_driven import run_python_tests as python_dispatch | 26 from pylib.host_driven import setup as host_driven_setup |
27 from pylib.instrumentation import setup as instrumentation_setup | 27 from pylib.instrumentation import setup as instrumentation_setup |
28 from pylib.instrumentation import test_options as instrumentation_test_options | 28 from pylib.instrumentation import test_options as instrumentation_test_options |
29 from pylib.uiautomator import setup as uiautomator_setup | 29 from pylib.uiautomator import setup as uiautomator_setup |
30 from pylib.uiautomator import test_options as uiautomator_test_options | 30 from pylib.uiautomator import test_options as uiautomator_test_options |
31 from pylib.utils import report_results | 31 from pylib.utils import report_results |
32 from pylib.utils import run_tests_helper | 32 from pylib.utils import run_tests_helper |
33 | 33 |
34 | 34 |
35 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') | 35 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') |
36 | 36 |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
152 option_parser.add_option( | 152 option_parser.add_option( |
153 '-A', '--annotation', dest='annotation_str', | 153 '-A', '--annotation', dest='annotation_str', |
154 help=('Comma-separated list of annotations. Run only tests with any of ' | 154 help=('Comma-separated list of annotations. Run only tests with any of ' |
155 'the given annotations. An annotation can be either a key or a ' | 155 'the given annotations. An annotation can be either a key or a ' |
156 'key-values pair. A test that has no annotation is considered ' | 156 'key-values pair. A test that has no annotation is considered ' |
157 '"SmallTest".')) | 157 '"SmallTest".')) |
158 option_parser.add_option( | 158 option_parser.add_option( |
159 '-E', '--exclude-annotation', dest='exclude_annotation_str', | 159 '-E', '--exclude-annotation', dest='exclude_annotation_str', |
160 help=('Comma-separated list of annotations. Exclude tests with these ' | 160 help=('Comma-separated list of annotations. Exclude tests with these ' |
161 'annotations.')) | 161 'annotations.')) |
162 option_parser.add_option('-j', '--java_only', action='store_true', | |
163 default=False, help='Run only the Java tests.') | |
164 option_parser.add_option('-p', '--python_only', action='store_true', | |
165 default=False, | |
166 help='Run only the host-driven tests.') | |
167 option_parser.add_option('--screenshot', dest='screenshot_failures', | 162 option_parser.add_option('--screenshot', dest='screenshot_failures', |
168 action='store_true', | 163 action='store_true', |
169 help='Capture screenshots of test failures') | 164 help='Capture screenshots of test failures') |
170 option_parser.add_option('--save-perf-json', action='store_true', | 165 option_parser.add_option('--save-perf-json', action='store_true', |
171 help='Saves the JSON file for each UI Perf test.') | 166 help='Saves the JSON file for each UI Perf test.') |
172 option_parser.add_option('--official-build', help='Run official build tests.') | 167 option_parser.add_option('--official-build', action='store_true', |
173 option_parser.add_option('--python_test_root', | 168 help='Run official build tests.') |
174 help='Root of the host-driven tests.') | |
175 option_parser.add_option('--keep_test_server_ports', | 169 option_parser.add_option('--keep_test_server_ports', |
176 action='store_true', | 170 action='store_true', |
177 help=('Indicates the test server ports must be ' | 171 help=('Indicates the test server ports must be ' |
178 'kept. When this is run via a sharder ' | 172 'kept. When this is run via a sharder ' |
179 'the test server ports should be kept and ' | 173 'the test server ports should be kept and ' |
180 'should not be reset.')) | 174 'should not be reset.')) |
181 # TODO(gkanwar): This option is deprecated. Remove it in the future. | 175 # TODO(gkanwar): This option is deprecated. Remove it in the future. |
182 option_parser.add_option('--disable_assertions', action='store_true', | 176 option_parser.add_option('--disable_assertions', action='store_true', |
183 help=('(DEPRECATED) Run with java assertions ' | 177 help=('(DEPRECATED) Run with java assertions ' |
184 'disabled.')) | 178 'disabled.')) |
185 option_parser.add_option('--test_data', action='append', default=[], | 179 option_parser.add_option('--test_data', action='append', default=[], |
186 help=('Each instance defines a directory of test ' | 180 help=('Each instance defines a directory of test ' |
187 'data that should be copied to the target(s) ' | 181 'data that should be copied to the target(s) ' |
188 'before running the tests. The argument ' | 182 'before running the tests. The argument ' |
189 'should be of the form <target>:<source>, ' | 183 'should be of the form <target>:<source>, ' |
190 '<target> is relative to the device data' | 184 '<target> is relative to the device data' |
191 'directory, and <source> is relative to the ' | 185 'directory, and <source> is relative to the ' |
192 'chromium build directory.')) | 186 'chromium build directory.')) |
193 | 187 |
194 | 188 |
195 def ProcessJavaTestOptions(options, error_func): | 189 def ProcessJavaTestOptions(options, error_func): |
196 """Processes options/arguments and populates |options| with defaults.""" | 190 """Processes options/arguments and populates |options| with defaults.""" |
197 | 191 |
198 if options.java_only and options.python_only: | |
199 error_func('Options java_only (-j) and python_only (-p) ' | |
200 'are mutually exclusive.') | |
201 options.run_java_tests = True | |
202 options.run_python_tests = True | |
203 if options.java_only: | |
204 options.run_python_tests = False | |
205 elif options.python_only: | |
206 options.run_java_tests = False | |
207 | |
208 if not options.python_test_root: | |
209 options.run_python_tests = False | |
210 | |
211 if options.annotation_str: | 192 if options.annotation_str: |
212 options.annotations = options.annotation_str.split(',') | 193 options.annotations = options.annotation_str.split(',') |
213 elif options.test_filter: | 194 elif options.test_filter: |
214 options.annotations = [] | 195 options.annotations = [] |
215 else: | 196 else: |
216 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', | 197 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', |
217 'EnormousTest'] | 198 'EnormousTest'] |
218 | 199 |
219 if options.exclude_annotation_str: | 200 if options.exclude_annotation_str: |
220 options.exclude_annotations = options.exclude_annotation_str.split(',') | 201 options.exclude_annotations = options.exclude_annotation_str.split(',') |
221 else: | 202 else: |
222 options.exclude_annotations = [] | 203 options.exclude_annotations = [] |
223 | 204 |
224 if not options.keep_test_server_ports: | 205 if not options.keep_test_server_ports: |
225 if not ports.ResetTestServerPortAllocation(): | 206 if not ports.ResetTestServerPortAllocation(): |
226 raise Exception('Failed to reset test server port.') | 207 raise Exception('Failed to reset test server port.') |
227 | 208 |
228 | 209 |
229 def AddInstrumentationTestOptions(option_parser): | 210 def AddInstrumentationTestOptions(option_parser): |
230 """Adds Instrumentation test options to |option_parser|.""" | 211 """Adds Instrumentation test options to |option_parser|.""" |
231 | 212 |
232 option_parser.usage = '%prog instrumentation [options]' | 213 option_parser.usage = '%prog instrumentation [options]' |
233 option_parser.command_list = [] | 214 option_parser.command_list = [] |
234 option_parser.example = ('%prog instrumentation ' | 215 option_parser.example = ('%prog instrumentation ' |
235 '--test-apk=ChromiumTestShellTest') | 216 '--test-apk=ChromiumTestShellTest') |
236 | 217 |
237 AddJavaTestOptions(option_parser) | 218 AddJavaTestOptions(option_parser) |
238 AddCommonOptions(option_parser) | 219 AddCommonOptions(option_parser) |
239 | 220 |
| 221 option_parser.add_option('-j', '--java_only', action='store_true', |
| 222 default=False, help='Run only the Java tests.') |
| 223 option_parser.add_option('-p', '--python_only', action='store_true', |
| 224 default=False, |
| 225 help='Run only the host-driven tests.') |
| 226 option_parser.add_option('--python_test_root', |
| 227 help='Root of the host-driven tests.') |
240 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', | 228 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', |
241 action='store_true', | 229 action='store_true', |
242 help='Wait for debugger.') | 230 help='Wait for debugger.') |
243 #TODO(craigdh): Remove option once -I is no longer passed downstream. | 231 #TODO(craigdh): Remove option once -I is no longer passed downstream. |
244 option_parser.add_option('-I', dest='install_apk', action='store_true', | 232 option_parser.add_option('-I', dest='install_apk', action='store_true', |
245 help='(DEPRECATED) Install the test apk.') | 233 help='(DEPRECATED) Install the test apk.') |
246 option_parser.add_option( | 234 option_parser.add_option( |
247 '--test-apk', dest='test_apk', | 235 '--test-apk', dest='test_apk', |
248 help=('The name of the apk containing the tests ' | 236 help=('The name of the apk containing the tests ' |
249 '(without the .apk extension; e.g. "ContentShellTest"). ' | 237 '(without the .apk extension; e.g. "ContentShellTest"). ' |
250 'Alternatively, this can be a full path to the apk.')) | 238 'Alternatively, this can be a full path to the apk.')) |
251 | 239 |
252 | 240 |
253 def ProcessInstrumentationOptions(options, error_func): | 241 def ProcessInstrumentationOptions(options, error_func): |
254 """Processes options/arguments and populate |options| with defaults. | 242 """Processes options/arguments and populate |options| with defaults. |
255 | 243 |
256 Args: | 244 Args: |
257 options: optparse.Options object. | 245 options: optparse.Options object. |
258 error_func: Function to call with the error message in case of an error. | 246 error_func: Function to call with the error message in case of an error. |
259 | 247 |
260 Returns: | 248 Returns: |
261 An InstrumentationOptions named tuple which contains all options relevant to | 249 An InstrumentationOptions named tuple which contains all options relevant to |
262 instrumentation tests. | 250 instrumentation tests. |
263 """ | 251 """ |
264 | 252 |
265 ProcessJavaTestOptions(options, error_func) | 253 ProcessJavaTestOptions(options, error_func) |
266 | 254 |
| 255 if options.java_only and options.python_only: |
| 256 error_func('Options java_only (-j) and python_only (-p) ' |
| 257 'are mutually exclusive.') |
| 258 options.run_java_tests = True |
| 259 options.run_python_tests = True |
| 260 if options.java_only: |
| 261 options.run_python_tests = False |
| 262 elif options.python_only: |
| 263 options.run_java_tests = False |
| 264 |
| 265 if not options.python_test_root: |
| 266 options.run_python_tests = False |
| 267 |
267 if not options.test_apk: | 268 if not options.test_apk: |
268 error_func('--test-apk must be specified.') | 269 error_func('--test-apk must be specified.') |
269 | 270 |
270 if os.path.exists(options.test_apk): | 271 if os.path.exists(options.test_apk): |
271 # The APK is fully qualified, assume the JAR lives along side. | 272 # The APK is fully qualified, assume the JAR lives along side. |
272 options.test_apk_path = options.test_apk | 273 options.test_apk_path = options.test_apk |
273 options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + | 274 options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + |
274 '.jar') | 275 '.jar') |
275 else: | 276 else: |
276 options.test_apk_path = os.path.join(_SDK_OUT_DIR, | 277 options.test_apk_path = os.path.join(_SDK_OUT_DIR, |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
424 tests, runner_factory, options.wait_for_debugger, | 425 tests, runner_factory, options.wait_for_debugger, |
425 options.test_device, | 426 options.test_device, |
426 shard=True, | 427 shard=True, |
427 build_type=options.build_type, | 428 build_type=options.build_type, |
428 test_timeout=None, | 429 test_timeout=None, |
429 num_retries=options.num_retries) | 430 num_retries=options.num_retries) |
430 | 431 |
431 results.AddTestRunResults(test_results) | 432 results.AddTestRunResults(test_results) |
432 | 433 |
433 if options.run_python_tests: | 434 if options.run_python_tests: |
434 test_results, test_exit_code = ( | 435 runner_factory, tests = host_driven_setup.InstrumentationSetup( |
435 python_dispatch.DispatchPythonTests(options)) | 436 options.python_test_root, options.official_build, |
| 437 instrumentation_options) |
| 438 |
| 439 test_results, test_exit_code = test_dispatcher.RunTests( |
| 440 tests, runner_factory, False, |
| 441 options.test_device, |
| 442 shard=True, |
| 443 build_type=options.build_type, |
| 444 test_timeout=None, |
| 445 num_retries=options.num_retries) |
436 | 446 |
437 results.AddTestRunResults(test_results) | 447 results.AddTestRunResults(test_results) |
438 | 448 |
439 # Only allow exit code escalation | 449 # Only allow exit code escalation |
440 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 450 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
441 exit_code = test_exit_code | 451 exit_code = test_exit_code |
442 | 452 |
443 report_results.LogFull( | 453 report_results.LogFull( |
444 results=results, | 454 results=results, |
445 test_type='Instrumentation', | 455 test_type='Instrumentation', |
446 test_package=os.path.basename(options.test_apk), | 456 test_package=os.path.basename(options.test_apk), |
447 annotation=options.annotations, | 457 annotation=options.annotations, |
448 build_type=options.build_type, | 458 build_type=options.build_type, |
449 flakiness_server=options.flakiness_dashboard_server) | 459 flakiness_server=options.flakiness_dashboard_server) |
450 | 460 |
451 return exit_code | 461 return exit_code |
452 | 462 |
453 | 463 |
454 def _RunUIAutomatorTests(options, error_func): | 464 def _RunUIAutomatorTests(options, error_func): |
455 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | 465 """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
456 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) | 466 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) |
457 | 467 |
458 results = base_test_result.TestRunResults() | 468 results = base_test_result.TestRunResults() |
459 exit_code = 0 | 469 exit_code = 0 |
460 | 470 |
461 if options.run_java_tests: | 471 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) |
462 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) | |
463 | 472 |
464 test_results, exit_code = test_dispatcher.RunTests( | 473 results, exit_code = test_dispatcher.RunTests( |
465 tests, runner_factory, False, options.test_device, | 474 tests, runner_factory, False, options.test_device, |
466 shard=True, | 475 shard=True, |
467 build_type=options.build_type, | 476 build_type=options.build_type, |
468 test_timeout=None, | 477 test_timeout=None, |
469 num_retries=options.num_retries) | 478 num_retries=options.num_retries) |
470 | |
471 results.AddTestRunResults(test_results) | |
472 | |
473 if options.run_python_tests: | |
474 test_results, test_exit_code = ( | |
475 python_dispatch.DispatchPythonTests(options)) | |
476 | |
477 results.AddTestRunResults(test_results) | |
478 | |
479 # Only allow exit code escalation | |
480 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
481 exit_code = test_exit_code | |
482 | 479 |
483 report_results.LogFull( | 480 report_results.LogFull( |
484 results=results, | 481 results=results, |
485 test_type='UIAutomator', | 482 test_type='UIAutomator', |
486 test_package=os.path.basename(options.test_jar), | 483 test_package=os.path.basename(options.test_jar), |
487 annotation=options.annotations, | 484 annotation=options.annotations, |
488 build_type=options.build_type, | 485 build_type=options.build_type, |
489 flakiness_server=options.flakiness_dashboard_server) | 486 flakiness_server=options.flakiness_dashboard_server) |
490 | 487 |
491 return exit_code | 488 return exit_code |
(...skipping 26 matching lines...) Expand all Loading... |
518 | 515 |
519 if command == 'gtest': | 516 if command == 'gtest': |
520 return _RunGTests(options, option_parser.error) | 517 return _RunGTests(options, option_parser.error) |
521 elif command == 'instrumentation': | 518 elif command == 'instrumentation': |
522 return _RunInstrumentationTests(options, option_parser.error) | 519 return _RunInstrumentationTests(options, option_parser.error) |
523 elif command == 'uiautomator': | 520 elif command == 'uiautomator': |
524 return _RunUIAutomatorTests(options, option_parser.error) | 521 return _RunUIAutomatorTests(options, option_parser.error) |
525 else: | 522 else: |
526 raise Exception('Unknown test type.') | 523 raise Exception('Unknown test type.') |
527 | 524 |
528 return exit_code | |
529 | |
530 | 525 |
531 def HelpCommand(command, options, args, option_parser): | 526 def HelpCommand(command, options, args, option_parser): |
532 """Display help for a certain command, or overall help. | 527 """Display help for a certain command, or overall help. |
533 | 528 |
534 Args: | 529 Args: |
535 command: String indicating the command that was received to trigger | 530 command: String indicating the command that was received to trigger |
536 this function. | 531 this function. |
537 options: optparse options dictionary. | 532 options: optparse options dictionary. |
538 args: List of extra args from optparse. | 533 args: List of extra args from optparse. |
539 option_parser: optparse.OptionParser object. | 534 option_parser: optparse.OptionParser object. |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
621 return 0 | 616 return 0 |
622 command = argv[1] | 617 command = argv[1] |
623 VALID_COMMANDS[command].add_options_func(option_parser) | 618 VALID_COMMANDS[command].add_options_func(option_parser) |
624 options, args = option_parser.parse_args(argv) | 619 options, args = option_parser.parse_args(argv) |
625 return VALID_COMMANDS[command].run_command_func( | 620 return VALID_COMMANDS[command].run_command_func( |
626 command, options, args, option_parser) | 621 command, options, args, option_parser) |
627 | 622 |
628 | 623 |
629 if __name__ == '__main__': | 624 if __name__ == '__main__': |
630 sys.exit(main(sys.argv)) | 625 sys.exit(main(sys.argv)) |
OLD | NEW |