OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all types of tests from one unified interface. | 7 """Runs all types of tests from one unified interface. |
8 | 8 |
9 TODO(gkanwar): | 9 TODO(gkanwar): |
10 * Add options to run Monkey tests. | 10 * Add options to run Monkey tests. |
11 """ | 11 """ |
12 | 12 |
13 import collections | 13 import collections |
| 14 import logging |
14 import optparse | 15 import optparse |
15 import os | 16 import os |
16 import shutil | 17 import shutil |
17 import sys | 18 import sys |
18 | 19 |
| 20 from pylib import android_commands |
19 from pylib import constants | 21 from pylib import constants |
20 from pylib import ports | 22 from pylib import ports |
21 from pylib.base import base_test_result | 23 from pylib.base import base_test_result |
22 from pylib.base import test_dispatcher | 24 from pylib.base import test_dispatcher |
23 from pylib.gtest import gtest_config | 25 from pylib.gtest import gtest_config |
24 from pylib.gtest import setup as gtest_setup | 26 from pylib.gtest import setup as gtest_setup |
25 from pylib.gtest import test_options as gtest_test_options | 27 from pylib.gtest import test_options as gtest_test_options |
26 from pylib.host_driven import setup as host_driven_setup | 28 from pylib.host_driven import setup as host_driven_setup |
27 from pylib.instrumentation import setup as instrumentation_setup | 29 from pylib.instrumentation import setup as instrumentation_setup |
28 from pylib.instrumentation import test_options as instrumentation_test_options | 30 from pylib.instrumentation import test_options as instrumentation_test_options |
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
447 Returns: | 449 Returns: |
448 A PerfOptions named tuple which contains all options relevant to | 450 A PerfOptions named tuple which contains all options relevant to |
449 perf tests. | 451 perf tests. |
450 """ | 452 """ |
451 if not options.steps and not options.print_step: | 453 if not options.steps and not options.print_step: |
452 error_func('Please specify --steps or --print-step') | 454 error_func('Please specify --steps or --print-step') |
453 return perf_test_options.PerfOptions( | 455 return perf_test_options.PerfOptions( |
454 options.steps, options.flaky_steps, options.print_step) | 456 options.steps, options.flaky_steps, options.print_step) |
455 | 457 |
456 | 458 |
457 | 459 def _RunGTests(options, error_func, devices): |
458 def _RunGTests(options, error_func): | |
459 """Subcommand of RunTestsCommands which runs gtests.""" | 460 """Subcommand of RunTestsCommands which runs gtests.""" |
460 ProcessGTestOptions(options) | 461 ProcessGTestOptions(options) |
461 | 462 |
462 exit_code = 0 | 463 exit_code = 0 |
463 for suite_name in options.suite_name: | 464 for suite_name in options.suite_name: |
464 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for | 465 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for |
465 # the gtest command. | 466 # the gtest command. |
466 gtest_options = gtest_test_options.GTestOptions( | 467 gtest_options = gtest_test_options.GTestOptions( |
467 options.tool, | 468 options.tool, |
468 options.cleanup_test_files, | 469 options.cleanup_test_files, |
469 options.push_deps, | 470 options.push_deps, |
470 options.test_filter, | 471 options.test_filter, |
471 options.test_arguments, | 472 options.test_arguments, |
472 options.timeout, | 473 options.timeout, |
473 suite_name) | 474 suite_name) |
474 runner_factory, tests = gtest_setup.Setup(gtest_options) | 475 runner_factory, tests = gtest_setup.Setup(gtest_options, devices) |
475 | 476 |
476 results, test_exit_code = test_dispatcher.RunTests( | 477 results, test_exit_code = test_dispatcher.RunTests( |
477 tests, runner_factory, False, options.test_device, | 478 tests, runner_factory, devices, shard=True, test_timeout=None, |
478 shard=True, | |
479 test_timeout=None, | |
480 num_retries=options.num_retries) | 479 num_retries=options.num_retries) |
481 | 480 |
482 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 481 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
483 exit_code = test_exit_code | 482 exit_code = test_exit_code |
484 | 483 |
485 report_results.LogFull( | 484 report_results.LogFull( |
486 results=results, | 485 results=results, |
487 test_type='Unit test', | 486 test_type='Unit test', |
488 test_package=suite_name, | 487 test_package=suite_name, |
489 flakiness_server=options.flakiness_dashboard_server) | 488 flakiness_server=options.flakiness_dashboard_server) |
490 | 489 |
491 if os.path.isdir(constants.ISOLATE_DEPS_DIR): | 490 if os.path.isdir(constants.ISOLATE_DEPS_DIR): |
492 shutil.rmtree(constants.ISOLATE_DEPS_DIR) | 491 shutil.rmtree(constants.ISOLATE_DEPS_DIR) |
493 | 492 |
494 return exit_code | 493 return exit_code |
495 | 494 |
496 | 495 |
497 def _RunInstrumentationTests(options, error_func): | 496 def _RunInstrumentationTests(options, error_func, devices): |
498 """Subcommand of RunTestsCommands which runs instrumentation tests.""" | 497 """Subcommand of RunTestsCommands which runs instrumentation tests.""" |
499 instrumentation_options = ProcessInstrumentationOptions(options, error_func) | 498 instrumentation_options = ProcessInstrumentationOptions(options, error_func) |
500 | 499 |
| 500 if len(devices) > 1 and options.wait_for_debugger: |
| 501 logging.warning('Debugger can not be sharded, using first available device') |
| 502 devices = devices[:1] |
| 503 |
501 results = base_test_result.TestRunResults() | 504 results = base_test_result.TestRunResults() |
502 exit_code = 0 | 505 exit_code = 0 |
503 | 506 |
504 if options.run_java_tests: | 507 if options.run_java_tests: |
505 runner_factory, tests = instrumentation_setup.Setup(instrumentation_options) | 508 runner_factory, tests = instrumentation_setup.Setup(instrumentation_options) |
506 | 509 |
507 test_results, exit_code = test_dispatcher.RunTests( | 510 test_results, exit_code = test_dispatcher.RunTests( |
508 tests, runner_factory, options.wait_for_debugger, | 511 tests, runner_factory, devices, shard=True, test_timeout=None, |
509 options.test_device, | |
510 shard=True, | |
511 test_timeout=None, | |
512 num_retries=options.num_retries) | 512 num_retries=options.num_retries) |
513 | 513 |
514 results.AddTestRunResults(test_results) | 514 results.AddTestRunResults(test_results) |
515 | 515 |
516 if options.run_python_tests: | 516 if options.run_python_tests: |
517 runner_factory, tests = host_driven_setup.InstrumentationSetup( | 517 runner_factory, tests = host_driven_setup.InstrumentationSetup( |
518 options.host_driven_root, options.official_build, | 518 options.host_driven_root, options.official_build, |
519 instrumentation_options) | 519 instrumentation_options) |
520 | 520 |
521 if tests: | 521 if tests: |
522 test_results, test_exit_code = test_dispatcher.RunTests( | 522 test_results, test_exit_code = test_dispatcher.RunTests( |
523 tests, runner_factory, False, | 523 tests, runner_factory, devices, shard=True, test_timeout=None, |
524 options.test_device, | |
525 shard=True, | |
526 test_timeout=None, | |
527 num_retries=options.num_retries) | 524 num_retries=options.num_retries) |
528 | 525 |
529 results.AddTestRunResults(test_results) | 526 results.AddTestRunResults(test_results) |
530 | 527 |
531 # Only allow exit code escalation | 528 # Only allow exit code escalation |
532 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 529 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
533 exit_code = test_exit_code | 530 exit_code = test_exit_code |
534 | 531 |
535 report_results.LogFull( | 532 report_results.LogFull( |
536 results=results, | 533 results=results, |
537 test_type='Instrumentation', | 534 test_type='Instrumentation', |
538 test_package=os.path.basename(options.test_apk), | 535 test_package=os.path.basename(options.test_apk), |
539 annotation=options.annotations, | 536 annotation=options.annotations, |
540 flakiness_server=options.flakiness_dashboard_server) | 537 flakiness_server=options.flakiness_dashboard_server) |
541 | 538 |
542 return exit_code | 539 return exit_code |
543 | 540 |
544 | 541 |
545 def _RunUIAutomatorTests(options, error_func): | 542 def _RunUIAutomatorTests(options, error_func, devices): |
546 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | 543 """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
547 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) | 544 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) |
548 | 545 |
549 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) | 546 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) |
550 | 547 |
551 results, exit_code = test_dispatcher.RunTests( | 548 results, exit_code = test_dispatcher.RunTests( |
552 tests, runner_factory, False, options.test_device, | 549 tests, runner_factory, devices, shard=True, test_timeout=None, |
553 shard=True, | |
554 test_timeout=None, | |
555 num_retries=options.num_retries) | 550 num_retries=options.num_retries) |
556 | 551 |
557 report_results.LogFull( | 552 report_results.LogFull( |
558 results=results, | 553 results=results, |
559 test_type='UIAutomator', | 554 test_type='UIAutomator', |
560 test_package=os.path.basename(options.test_jar), | 555 test_package=os.path.basename(options.test_jar), |
561 annotation=options.annotations, | 556 annotation=options.annotations, |
562 flakiness_server=options.flakiness_dashboard_server) | 557 flakiness_server=options.flakiness_dashboard_server) |
563 | 558 |
564 return exit_code | 559 return exit_code |
565 | 560 |
566 | 561 |
567 def _RunMonkeyTests(options, error_func): | 562 def _RunMonkeyTests(options, error_func, devices): |
568 """Subcommand of RunTestsCommands which runs monkey tests.""" | 563 """Subcommand of RunTestsCommands which runs monkey tests.""" |
569 monkey_options = ProcessMonkeyTestOptions(options, error_func) | 564 monkey_options = ProcessMonkeyTestOptions(options, error_func) |
570 | 565 |
571 runner_factory, tests = monkey_setup.Setup(monkey_options) | 566 runner_factory, tests = monkey_setup.Setup(monkey_options) |
572 | 567 |
573 results, exit_code = test_dispatcher.RunTests( | 568 results, exit_code = test_dispatcher.RunTests( |
574 tests, runner_factory, False, None, shard=False, test_timeout=None) | 569 tests, runner_factory, devices, shard=False, test_timeout=None) |
575 | 570 |
576 report_results.LogFull( | 571 report_results.LogFull( |
577 results=results, | 572 results=results, |
578 test_type='Monkey', | 573 test_type='Monkey', |
579 test_package='Monkey') | 574 test_package='Monkey') |
580 | 575 |
581 return exit_code | 576 return exit_code |
582 | 577 |
583 | 578 |
584 def _RunPerfTests(options, error_func): | 579 def _RunPerfTests(options, error_func, devices): |
585 """Subcommand of RunTestsCommands which runs perf tests.""" | 580 """Subcommand of RunTestsCommands which runs perf tests.""" |
586 perf_options = ProcessPerfTestOptions(options, error_func) | 581 perf_options = ProcessPerfTestOptions(options, error_func) |
587 # Just print the results from a single previously executed step. | 582 # Just print the results from a single previously executed step. |
588 if perf_options.print_step: | 583 if perf_options.print_step: |
589 return perf_test_runner.PrintTestOutput(perf_options.print_step) | 584 return perf_test_runner.PrintTestOutput(perf_options.print_step) |
590 | 585 |
591 runner_factory, tests = perf_setup.Setup(perf_options) | 586 runner_factory, tests = perf_setup.Setup(perf_options) |
592 | 587 |
593 results, _ = test_dispatcher.RunTests( | 588 results, _ = test_dispatcher.RunTests( |
594 tests, runner_factory, False, None, shard=True, test_timeout=None) | 589 tests, runner_factory, devices, shard=True, test_timeout=None) |
595 | 590 |
596 report_results.LogFull( | 591 report_results.LogFull( |
597 results=results, | 592 results=results, |
598 test_type='Perf', | 593 test_type='Perf', |
599 test_package='Perf') | 594 test_package='Perf') |
600 # Always return 0 on the sharding stage. Individual tests exit_code | 595 # Always return 0 on the sharding stage. Individual tests exit_code |
601 # will be returned on the print_step stage. | 596 # will be returned on the print_step stage. |
602 return 0 | 597 return 0 |
603 | 598 |
604 | 599 |
| 600 def _GetAttachedDevices(test_device=None): |
| 601 """Get all attached devices. |
| 602 |
| 603 Args: |
| 604 test_device: Name of a specific device to use. |
| 605 |
| 606 Returns: |
| 607 A list of attached devices. |
| 608 """ |
| 609 attached_devices = [] |
| 610 |
| 611 attached_devices = android_commands.GetAttachedDevices() |
| 612 if test_device: |
| 613 assert test_device in attached_devices, ( |
| 614 'Did not find device %s among attached device. Attached devices: %s' |
| 615 % (test_device, ', '.join(attached_devices))) |
| 616 attached_devices = [test_device] |
| 617 |
| 618 assert attached_devices, 'No devices attached.' |
| 619 |
| 620 return sorted(attached_devices) |
| 621 |
| 622 |
605 def RunTestsCommand(command, options, args, option_parser): | 623 def RunTestsCommand(command, options, args, option_parser): |
606 """Checks test type and dispatches to the appropriate function. | 624 """Checks test type and dispatches to the appropriate function. |
607 | 625 |
608 Args: | 626 Args: |
609 command: String indicating the command that was received to trigger | 627 command: String indicating the command that was received to trigger |
610 this function. | 628 this function. |
611 options: optparse options dictionary. | 629 options: optparse options dictionary. |
612 args: List of extra args from optparse. | 630 args: List of extra args from optparse. |
613 option_parser: optparse.OptionParser object. | 631 option_parser: optparse.OptionParser object. |
614 | 632 |
615 Returns: | 633 Returns: |
616 Integer indicated exit code. | 634 Integer indicated exit code. |
617 | 635 |
618 Raises: | 636 Raises: |
619 Exception: Unknown command name passed in, or an exception from an | 637 Exception: Unknown command name passed in, or an exception from an |
620 individual test runner. | 638 individual test runner. |
621 """ | 639 """ |
622 | 640 |
623 # Check for extra arguments | 641 # Check for extra arguments |
624 if len(args) > 2: | 642 if len(args) > 2: |
625 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) | 643 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) |
626 return constants.ERROR_EXIT_CODE | 644 return constants.ERROR_EXIT_CODE |
627 | 645 |
628 ProcessCommonOptions(options) | 646 ProcessCommonOptions(options) |
629 | 647 |
| 648 devices = _GetAttachedDevices(options.test_device) |
| 649 |
630 if command == 'gtest': | 650 if command == 'gtest': |
631 return _RunGTests(options, option_parser.error) | 651 return _RunGTests(options, option_parser.error, devices) |
632 elif command == 'instrumentation': | 652 elif command == 'instrumentation': |
633 return _RunInstrumentationTests(options, option_parser.error) | 653 return _RunInstrumentationTests(options, option_parser.error, devices) |
634 elif command == 'uiautomator': | 654 elif command == 'uiautomator': |
635 return _RunUIAutomatorTests(options, option_parser.error) | 655 return _RunUIAutomatorTests(options, option_parser.error, devices) |
636 elif command == 'monkey': | 656 elif command == 'monkey': |
637 return _RunMonkeyTests(options, option_parser.error) | 657 return _RunMonkeyTests(options, option_parser.error, devices) |
638 elif command == 'perf': | 658 elif command == 'perf': |
639 return _RunPerfTests(options, option_parser.error) | 659 return _RunPerfTests(options, option_parser.error, devices) |
640 else: | 660 else: |
641 raise Exception('Unknown test type.') | 661 raise Exception('Unknown test type.') |
642 | 662 |
643 | 663 |
644 def HelpCommand(command, options, args, option_parser): | 664 def HelpCommand(command, options, args, option_parser): |
645 """Display help for a certain command, or overall help. | 665 """Display help for a certain command, or overall help. |
646 | 666 |
647 Args: | 667 Args: |
648 command: String indicating the command that was received to trigger | 668 command: String indicating the command that was received to trigger |
649 this function. | 669 this function. |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
739 option_parser.error('Invalid command.') | 759 option_parser.error('Invalid command.') |
740 command = argv[1] | 760 command = argv[1] |
741 VALID_COMMANDS[command].add_options_func(option_parser) | 761 VALID_COMMANDS[command].add_options_func(option_parser) |
742 options, args = option_parser.parse_args(argv) | 762 options, args = option_parser.parse_args(argv) |
743 return VALID_COMMANDS[command].run_command_func( | 763 return VALID_COMMANDS[command].run_command_func( |
744 command, options, args, option_parser) | 764 command, options, args, option_parser) |
745 | 765 |
746 | 766 |
747 if __name__ == '__main__': | 767 if __name__ == '__main__': |
748 sys.exit(main(sys.argv)) | 768 sys.exit(main(sys.argv)) |
OLD | NEW |