OLD | NEW |
---|---|
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Class for running instrumentation tests on a single device.""" | 5 """Class for running instrumentation tests on a single device.""" |
6 | 6 |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import re | 9 import re |
10 import time | 10 import time |
11 | 11 |
12 | |
13 sys.path.append(os.path.join(sys.path[0], | |
frankf
2013/09/20 02:07:39
import sys
bulach
2013/09/20 08:23:46
Done.
| |
14 os.pardir, os.pardir, 'build', 'util', 'lib', | |
15 'common')) | |
16 import perf_tests_results_helper | |
17 | |
12 from pylib import android_commands | 18 from pylib import android_commands |
13 from pylib import constants | 19 from pylib import constants |
14 from pylib import flag_changer | 20 from pylib import flag_changer |
15 from pylib import perf_tests_helper | |
16 from pylib import valgrind_tools | 21 from pylib import valgrind_tools |
17 from pylib.base import base_test_result | 22 from pylib.base import base_test_result |
18 from pylib.base import base_test_runner | 23 from pylib.base import base_test_runner |
19 from pylib.instrumentation import json_perf_parser | 24 from pylib.instrumentation import json_perf_parser |
20 | 25 |
21 import test_result | 26 import test_result |
22 | 27 |
23 | 28 |
24 _PERF_TEST_ANNOTATION = 'PerfTest' | 29 _PERF_TEST_ANNOTATION = 'PerfTest' |
25 | 30 |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
276 for raw_perf_set in raw_perf_data: | 281 for raw_perf_set in raw_perf_data: |
277 if raw_perf_set: | 282 if raw_perf_set: |
278 perf_set = raw_perf_set.split(',') | 283 perf_set = raw_perf_set.split(',') |
279 if len(perf_set) != 3: | 284 if len(perf_set) != 3: |
280 raise Exception('Unexpected number of tokens in perf annotation ' | 285 raise Exception('Unexpected number of tokens in perf annotation ' |
281 'string: ' + raw_perf_set) | 286 'string: ' + raw_perf_set) |
282 | 287 |
283 # Process the performance data | 288 # Process the performance data |
284 result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string, | 289 result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string, |
285 perf_set[0]) | 290 perf_set[0]) |
286 perf_tests_helper.PrintPerfResult(perf_set[1], perf_set[2], | 291 perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2], |
287 [result['average']], | 292 [result['average']], |
288 result['units']) | 293 result['units']) |
289 | 294 |
290 def _SetupIndividualTestTimeoutScale(self, test): | 295 def _SetupIndividualTestTimeoutScale(self, test): |
291 timeout_scale = self._GetIndividualTestTimeoutScale(test) | 296 timeout_scale = self._GetIndividualTestTimeoutScale(test) |
292 valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) | 297 valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) |
293 | 298 |
294 def _GetIndividualTestTimeoutScale(self, test): | 299 def _GetIndividualTestTimeoutScale(self, test): |
295 """Returns the timeout scale for the given |test|.""" | 300 """Returns the timeout scale for the given |test|.""" |
296 annotations = self.test_pkg.GetTestAnnotations(test) | 301 annotations = self.test_pkg.GetTestAnnotations(test) |
297 timeout_scale = 1 | 302 timeout_scale = 1 |
298 if 'TimeoutScale' in annotations: | 303 if 'TimeoutScale' in annotations: |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
366 duration_ms = 0 | 371 duration_ms = 0 |
367 message = str(e) | 372 message = str(e) |
368 if not message: | 373 if not message: |
369 message = 'No information.' | 374 message = 'No information.' |
370 results.AddResult(test_result.InstrumentationTestResult( | 375 results.AddResult(test_result.InstrumentationTestResult( |
371 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, | 376 test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms, |
372 log=message)) | 377 log=message)) |
373 raw_result = None | 378 raw_result = None |
374 self.TestTeardown(test, raw_result) | 379 self.TestTeardown(test, raw_result) |
375 return (results, None if results.DidRunPass() else test) | 380 return (results, None if results.DidRunPass() else test) |
OLD | NEW |