OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 | 5 |
6 import logging | 6 import logging |
7 import re | 7 import re |
8 import os | 8 import os |
9 import pexpect | 9 import pexpect |
10 | 10 |
11 from perf_tests_helper import PrintPerfResult | 11 from perf_tests_helper import PrintPerfResult |
12 from test_result import BaseTestResult, TestResults | 12 from test_result import BaseTestResult, TestResults |
13 from valgrind_tools import CreateTool | |
14 | 13 |
15 | 14 |
16 # TODO(bulach): TestPackage, TestPackageExecutable and | 15 # TODO(bulach): TestPackage, TestPackageExecutable and |
17 # TestPackageApk are a work in progress related to making the native tests | 16 # TestPackageApk are a work in progress related to making the native tests |
18 # run as a NDK-app from an APK rather than a stand-alone executable. | 17 # run as a NDK-app from an APK rather than a stand-alone executable. |
19 class TestPackage(object): | 18 class TestPackage(object): |
20 """A helper base class for both APK and stand-alone executables. | 19 """A helper base class for both APK and stand-alone executables. |
21 | 20 |
22 Args: | 21 Args: |
23 adb: ADB interface the tests are using. | 22 adb: ADB interface the tests are using. |
(...skipping 12 matching lines...) Expand all Loading... |
36 self.adb = adb | 35 self.adb = adb |
37 self.device = device | 36 self.device = device |
38 self.test_suite_full = test_suite | 37 self.test_suite_full = test_suite |
39 self.test_suite = os.path.splitext(test_suite)[0] | 38 self.test_suite = os.path.splitext(test_suite)[0] |
40 self.test_suite_basename = self._GetTestSuiteBaseName() | 39 self.test_suite_basename = self._GetTestSuiteBaseName() |
41 self.test_suite_dirname = os.path.dirname( | 40 self.test_suite_dirname = os.path.dirname( |
42 self.test_suite.split(self.test_suite_basename)[0]); | 41 self.test_suite.split(self.test_suite_basename)[0]); |
43 self.rebaseline = rebaseline | 42 self.rebaseline = rebaseline |
44 self.performance_test = performance_test | 43 self.performance_test = performance_test |
45 self.cleanup_test_files = cleanup_test_files | 44 self.cleanup_test_files = cleanup_test_files |
46 self.tool = CreateTool(tool, self.adb) | 45 self.tool = tool |
47 if timeout == 0: | 46 if timeout == 0: |
48 if self.test_suite_basename == 'page_cycler_tests': | 47 timeout = 60 |
49 timeout = 900 | |
50 else: | |
51 timeout = 60 | |
52 # On a VM (e.g. chromium buildbots), this timeout is way too small. | 48 # On a VM (e.g. chromium buildbots), this timeout is way too small. |
53 if os.environ.get('BUILDBOT_SLAVENAME'): | 49 if os.environ.get('BUILDBOT_SLAVENAME'): |
54 timeout = timeout * 2 | 50 timeout = timeout * 2 |
55 self.timeout = timeout * self.tool.GetTimeoutScale() | 51 self.timeout = timeout * self.tool.GetTimeoutScale() |
56 self.dump_debug_info = dump_debug_info | 52 self.dump_debug_info = dump_debug_info |
57 | 53 |
58 def _BeginGetIOStats(self): | 54 def _BeginGetIOStats(self): |
59 """Gets I/O statistics before running test. | 55 """Gets I/O statistics before running test. |
60 | 56 |
61 Return: | 57 Return: |
62 Tuple of (I/O stats object, flag of ready to continue). When encountering | 58 I/O stats object.The I/O stats object may be None if the test is not |
63 error, ready-to-continue flag is False, True otherwise. The I/O stats | 59 performance test. |
64 object may be None if the test is not performance test. | |
65 """ | 60 """ |
66 initial_io_stats = None | 61 initial_io_stats = None |
67 # Try to get the disk I/O statistics for all performance tests. | 62 # Try to get the disk I/O statistics for all performance tests. |
68 if self.performance_test and not self.rebaseline: | 63 if self.performance_test and not self.rebaseline: |
69 initial_io_stats = self.adb.GetIoStats() | 64 initial_io_stats = self.adb.GetIoStats() |
70 # Get rid of the noise introduced by launching Chrome for page cycler. | 65 return initial_io_stats |
71 if self.test_suite_basename == 'page_cycler_tests': | |
72 try: | |
73 chrome_launch_done_re = re.compile( | |
74 re.escape('Finish waiting for browser launch!')) | |
75 self.adb.WaitForLogMatch(chrome_launch_done_re) | |
76 initial_io_stats = self.adb.GetIoStats() | |
77 except pexpect.TIMEOUT: | |
78 logging.error('Test terminated because Chrome launcher has no' | |
79 'response after 120 second.') | |
80 return (None, False) | |
81 finally: | |
82 if self.dump_debug_info: | |
83 self.dump_debug_info.TakeScreenshot('_Launch_Chrome_') | |
84 return (initial_io_stats, True) | |
85 | 66 |
86 def _EndGetIOStats(self, initial_io_stats): | 67 def _EndGetIOStats(self, initial_io_stats): |
87 """Gets I/O statistics after running test and calcuate the I/O delta. | 68 """Gets I/O statistics after running test and calcuate the I/O delta. |
88 | 69 |
89 Args: | 70 Args: |
90 initial_io_stats: I/O stats object got from _BeginGetIOStats. | 71 initial_io_stats: I/O stats object got from _BeginGetIOStats. |
91 | 72 |
92 Return: | 73 Return: |
93 String for formated diso I/O statistics. | 74 String for formated diso I/O statistics. |
94 """ | 75 """ |
95 disk_io = '' | 76 disk_io = '' |
96 if self.performance_test and initial_io_stats: | 77 if self.performance_test and initial_io_stats: |
97 final_io_stats = self.adb.GetIoStats() | 78 final_io_stats = self.adb.GetIoStats() |
98 for stat in final_io_stats: | 79 for stat in final_io_stats: |
99 disk_io += '\n' + PrintPerfResult(stat, stat, | 80 disk_io += '\n' + PrintPerfResult(stat, stat, |
100 [final_io_stats[stat] - | 81 [final_io_stats[stat] - |
101 initial_io_stats[stat]], | 82 initial_io_stats[stat]], |
102 stat.split('_')[1], True, False) | 83 stat.split('_')[1], |
| 84 print_to_stdout=False) |
103 logging.info(disk_io) | 85 logging.info(disk_io) |
104 return disk_io | 86 return disk_io |
105 | 87 |
106 def GetDisabledPrefixes(self): | 88 def GetDisabledPrefixes(self): |
107 return ['DISABLED_', 'FLAKY_', 'FAILS_'] | 89 return ['DISABLED_', 'FLAKY_', 'FAILS_'] |
108 | 90 |
109 def _ParseGTestListTests(self, all_tests): | 91 def _ParseGTestListTests(self, all_tests): |
110 ret = [] | 92 ret = [] |
111 current = '' | 93 current = '' |
112 disabled_prefixes = self.GetDisabledPrefixes() | 94 disabled_prefixes = self.GetDisabledPrefixes() |
113 for test in all_tests: | 95 for test in all_tests: |
114 if not test: | 96 if not test: |
115 continue | 97 continue |
116 if test[0] != ' ': | 98 if test[0] != ' ' and test.endswith('.'): |
117 current = test | 99 current = test |
118 continue | 100 continue |
119 if 'YOU HAVE' in test: | 101 if 'YOU HAVE' in test: |
120 break | 102 break |
121 test_name = test[2:] | 103 test_name = test[2:] |
122 if not any([test_name.startswith(x) for x in disabled_prefixes]): | 104 if not any([test_name.startswith(x) for x in disabled_prefixes]): |
123 ret += [current + test_name] | 105 ret += [current + test_name] |
124 return ret | 106 return ret |
125 | 107 |
126 def PushDataAndPakFiles(self): | 108 def PushDataAndPakFiles(self): |
(...skipping 15 matching lines...) Expand all Loading... |
142 overall_fail = False | 124 overall_fail = False |
143 re_run = re.compile('\[ RUN \] ?(.*)\r\n') | 125 re_run = re.compile('\[ RUN \] ?(.*)\r\n') |
144 # APK tests rely on the END tag. | 126 # APK tests rely on the END tag. |
145 re_end = re.compile('\[ END \] ?(.*)\r\n') | 127 re_end = re.compile('\[ END \] ?(.*)\r\n') |
146 # Signal handlers are installed before starting tests | 128 # Signal handlers are installed before starting tests |
147 # to output the CRASHED marker when a crash happens. | 129 # to output the CRASHED marker when a crash happens. |
148 re_crash = re.compile('\[ CRASHED \](.*)\r\n') | 130 re_crash = re.compile('\[ CRASHED \](.*)\r\n') |
149 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') | 131 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') |
150 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') | 132 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') |
151 re_ok = re.compile('\[ OK \] ?(.*)\r\n') | 133 re_ok = re.compile('\[ OK \] ?(.*)\r\n') |
152 (io_stats_before, ready_to_continue) = self._BeginGetIOStats() | 134 io_stats_before = self._BeginGetIOStats() |
153 while ready_to_continue: | 135 while True: |
154 found = p.expect([re_run, pexpect.EOF, re_end, re_runner_fail], | 136 found = p.expect([re_run, pexpect.EOF, re_end, re_runner_fail], |
155 timeout=self.timeout) | 137 timeout=self.timeout) |
156 if found == 1: # matched pexpect.EOF | 138 if found == 1: # matched pexpect.EOF |
157 break | 139 break |
158 if found == 2: # matched END. | 140 if found == 2: # matched END. |
159 break | 141 break |
160 if found == 3: # RUNNER_FAILED | 142 if found == 3: # RUNNER_FAILED |
161 logging.error('RUNNER_FAILED') | 143 logging.error('RUNNER_FAILED') |
162 overall_fail = True | 144 overall_fail = True |
163 break | 145 break |
(...skipping 15 matching lines...) Expand all Loading... |
179 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''), | 161 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''), |
180 p.before)] | 162 p.before)] |
181 if found >= 3: | 163 if found >= 3: |
182 # The test bailed out (i.e., didn't print OK or FAIL). | 164 # The test bailed out (i.e., didn't print OK or FAIL). |
183 if found == 4: # pexpect.TIMEOUT | 165 if found == 4: # pexpect.TIMEOUT |
184 logging.error('Test terminated after %d second timeout.', | 166 logging.error('Test terminated after %d second timeout.', |
185 self.timeout) | 167 self.timeout) |
186 timed_out = True | 168 timed_out = True |
187 break | 169 break |
188 p.close() | 170 p.close() |
189 if not self.rebaseline and ready_to_continue: | 171 if not self.rebaseline: |
190 ok_tests += self._EndGetIOStats(io_stats_before) | 172 ok_tests += self._EndGetIOStats(io_stats_before) |
191 ret_code = self._GetGTestReturnCode() | 173 ret_code = self._GetGTestReturnCode() |
192 if ret_code: | 174 if ret_code: |
193 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, | 175 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, |
194 'pexpect.before: %s' | 176 'pexpect.before: %s' |
195 '\npexpect.after: %s' | 177 '\npexpect.after: %s' |
196 % (p.before, | 178 % (p.before, |
197 p.after))] | 179 p.after))] |
198 # Create TestResults and return | 180 # Create TestResults and return |
199 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, | 181 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, |
200 crashed=crashed_tests, timed_out=timed_out, | 182 crashed=crashed_tests, timed_out=timed_out, |
201 overall_fail=overall_fail) | 183 overall_fail=overall_fail) |
OLD | NEW |