| OLD | NEW |
| (Empty) |
| 1 #!/usr/bin/env python | |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 """Unit tests for annotated log parsers (aka log processors) used by runtest.py. | |
| 7 | |
| 8 The classes tested here reside in process_log_utils.py. | |
| 9 | |
| 10 The script runtest.py has the option to parse test output locally and send | |
| 11 results to the master via annotator steps. This file tests those parsers. | |
| 12 """ | |
| 13 | |
| 14 import json | |
| 15 import os | |
| 16 import unittest | |
| 17 | |
| 18 import test_env # pylint: disable=W0403,W0611 | |
| 19 | |
| 20 from slave import process_log_utils | |
| 21 | |
| 22 # These should be the same as the constants used in process_log_utils. | |
| 23 # See: http://docs.buildbot.net/current/developer/results.html | |
| 24 SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6) | |
| 25 | |
| 26 # Custom percentile numbers to use in the tests below. | |
| 27 TEST_PERCENTILES = [.05, .3, .8] | |
| 28 | |
| 29 | |
| 30 class LogProcessorTest(unittest.TestCase): | |
| 31 """Base class for log processor unit tests. Contains common operations.""" | |
| 32 | |
| 33 def setUp(self): | |
| 34 """Set up for all test method of each test method below.""" | |
| 35 super(LogProcessorTest, self).setUp() | |
| 36 self._revision = 12345 | |
| 37 self._webkit_revision = 67890 | |
| 38 | |
| 39 def _ConstructDefaultProcessor( | |
| 40 self, log_processor_class, factory_properties=None, | |
| 41 perf_expectations_path=None): | |
| 42 """Creates a log processor instance. | |
| 43 | |
| 44 Args: | |
| 45 log_processor_class: A sub-class of PerformanceLogProcessor. | |
| 46 factory_properties: A dictionary of properties (optional). | |
| 47 perf_expectations_path: Expectations file path (optional). | |
| 48 | |
| 49 Returns: | |
| 50 An instance of the given log processor class. | |
| 51 """ | |
| 52 factory_properties = factory_properties or {} | |
| 53 factory_properties['perf_filename'] = perf_expectations_path | |
| 54 factory_properties['perf_name'] = 'test-system' | |
| 55 factory_properties['test_name'] = 'test-name' | |
| 56 processor = log_processor_class( | |
| 57 revision=self._revision, build_properties={}, | |
| 58 factory_properties=factory_properties, | |
| 59 webkit_revision=self._webkit_revision) | |
| 60 | |
| 61 # Set custom percentiles. This will be used by GraphingLogProcessor, which | |
| 62 # has and uses a private member attribute called _percentiles. | |
| 63 if hasattr(processor, '_percentiles'): | |
| 64 processor._percentiles = TEST_PERCENTILES | |
| 65 | |
| 66 return processor | |
| 67 | |
| 68 def _ProcessLog(self, log_processor, logfile): # pylint: disable=R0201 | |
| 69 """Reads in a input log file and processes it. | |
| 70 | |
| 71 This changes the state of the log processor object; the output is stored | |
| 72 in the object and can be gotten using the PerformanceLogs() method. | |
| 73 | |
| 74 Args: | |
| 75 log_processor: An PerformanceLogProcessor instance. | |
| 76 logfile: File name of an input performance results log file. | |
| 77 """ | |
| 78 for line in open(os.path.join(test_env.DATA_PATH, logfile)): | |
| 79 log_processor.ProcessLine(line) | |
| 80 | |
| 81 def _CheckFileExistsWithData(self, logs, targetfile): | |
| 82 """Asserts that |targetfile| exists in the |logs| dict and is non-empty.""" | |
| 83 self.assertTrue(targetfile in logs, 'File %s was not output.' % targetfile) | |
| 84 self.assertTrue(logs[targetfile], 'File %s did not contain data.' % | |
| 85 targetfile) | |
| 86 | |
| 87 def _ConstructParseAndCheckLogfiles( | |
| 88 self, inputfiles, logfiles, log_processor_class, *args, **kwargs): | |
| 89 """Uses a log processor to process the given input files. | |
| 90 | |
| 91 Args: | |
| 92 inputfiles: A list of input performance results log file names. | |
| 93 logfiles: List of expected output ".dat" file names. | |
| 94 log_processor_class: The log processor class to use. | |
| 95 | |
| 96 Returns: | |
| 97 A dictionary mapping output file name to output file lines. | |
| 98 """ | |
| 99 parser = self._ConstructDefaultProcessor( | |
| 100 log_processor_class, *args, **kwargs) | |
| 101 for inputfile in inputfiles: | |
| 102 self._ProcessLog(parser, inputfile) | |
| 103 | |
| 104 logs = parser.PerformanceLogs() | |
| 105 for logfile in logfiles: | |
| 106 self._CheckFileExistsWithData(logs, logfile) | |
| 107 | |
| 108 return logs | |
| 109 | |
| 110 def _ConstructParseAndCheckJSON( | |
| 111 self, inputfiles, logfiles, subdir, log_processor_class, *args, **kwargs): | |
| 112 """Processes input with a log processor and checks against expectations. | |
| 113 | |
| 114 Args: | |
| 115 inputfiles: A list of input performance result log file names. | |
| 116 logfiles: A list of expected output ".dat" file names. | |
| 117 subdir: Subdirectory containing expected output files. | |
| 118 log_processor_class: A log processor class. | |
| 119 """ | |
| 120 logs = self._ConstructParseAndCheckLogfiles( | |
| 121 inputfiles, logfiles, log_processor_class, *args, **kwargs) | |
| 122 for filename in logfiles: | |
| 123 actual = json.loads('\n'.join(logs[filename])) | |
| 124 if subdir: | |
| 125 path = os.path.join(test_env.DATA_PATH, subdir, filename) | |
| 126 else: | |
| 127 path = os.path.join(test_env.DATA_PATH, filename) | |
| 128 expected = json.load(open(path)) | |
| 129 self.assertEqual(expected, actual, 'JSON data in %s did not match ' | |
| 130 'expectations.' % filename) | |
| 131 | |
| 132 | |
| 133 class GraphingLogProcessorTest(LogProcessorTest): | |
| 134 """Test case for basic functionality of GraphingLogProcessor class.""" | |
| 135 | |
| 136 def testSummary(self): | |
| 137 """Tests the output of "summary" files, which contain per-graph data.""" | |
| 138 input_files = ['graphing_processor.log'] | |
| 139 output_files = ['%s-summary.dat' % graph for graph in ('commit_charge', | |
| 140 'ws_final_total', 'vm_final_browser', 'vm_final_total', | |
| 141 'ws_final_browser', 'processes', 'artificial_graph')] | |
| 142 | |
| 143 self._ConstructParseAndCheckJSON(input_files, output_files, None, | |
| 144 process_log_utils.GraphingLogProcessor) | |
| 145 | |
| 146 def testGraphList(self): | |
| 147 """Tests the output of "graphs.dat" files, which contains a graph list.""" | |
| 148 input_files = ['graphing_processor.log'] | |
| 149 graphfile = 'graphs.dat' | |
| 150 output_files = [graphfile] | |
| 151 | |
| 152 logs = self._ConstructParseAndCheckLogfiles(input_files, output_files, | |
| 153 process_log_utils.GraphingLogProcessor) | |
| 154 | |
| 155 actual = json.loads('\n'.join(logs[graphfile])) | |
| 156 expected = json.load(open( | |
| 157 os.path.join(test_env.DATA_PATH, 'graphing_processor-graphs.dat'))) | |
| 158 | |
| 159 self.assertEqual(len(actual), len(expected)) | |
| 160 | |
| 161 for graph in expected: | |
| 162 self.assertTrue(graph['name'] in actual) | |
| 163 for element in graph: | |
| 164 self.assertEqual(actual[graph['name']][element], graph[element]) | |
| 165 | |
| 166 def testHistogramGeometricMeanAndStandardDeviation(self): | |
| 167 input_files = ['graphing_processor.log'] | |
| 168 summary_file = 'hist1-summary.dat' | |
| 169 output_files = [summary_file] | |
| 170 | |
| 171 logs = self._ConstructParseAndCheckLogfiles(input_files, output_files, | |
| 172 process_log_utils.GraphingLogProcessor) | |
| 173 | |
| 174 actual = json.loads('\n'.join(logs[summary_file])) | |
| 175 expected = json.load(open( | |
| 176 os.path.join(test_env.DATA_PATH, summary_file))) | |
| 177 | |
| 178 self.assertEqual(actual, expected, 'Filename %s did not contain expected ' | |
| 179 'data.' % summary_file) | |
| 180 | |
| 181 def testHistogramPercentiles(self): | |
| 182 input_files = ['graphing_processor.log'] | |
| 183 summary_files = ['hist1_%s-summary.dat' % str(p) for p in TEST_PERCENTILES] | |
| 184 output_files = summary_files | |
| 185 | |
| 186 logs = self._ConstructParseAndCheckLogfiles(input_files, output_files, | |
| 187 process_log_utils.GraphingLogProcessor) | |
| 188 | |
| 189 for filename in output_files: | |
| 190 actual = json.loads('\n'.join(logs[filename])) | |
| 191 expected = json.load(open(os.path.join(test_env.DATA_PATH, filename))) | |
| 192 self.assertEqual(actual, expected, 'Filename %s did not contain expected ' | |
| 193 'data.' % filename) | |
| 194 | |
| 195 | |
| 196 class GraphingLogProcessorPerfTest(LogProcessorTest): | |
| 197 """Another test case for the GraphingLogProcessor class. | |
| 198 | |
| 199 The tests in this test case compare results against the contents of a | |
| 200 perf expectations file. | |
| 201 """ | |
| 202 | |
| 203 def _TestPerfExpectations(self, perf_expectations_file): | |
| 204 perf_expectations_path = os.path.join( | |
| 205 test_env.DATA_PATH, perf_expectations_file) | |
| 206 | |
| 207 input_file = 'graphing_processor.log' | |
| 208 graph_file = 'graphs.dat' | |
| 209 | |
| 210 parser = self._ConstructDefaultProcessor( | |
| 211 process_log_utils.GraphingLogProcessor, | |
| 212 factory_properties={'expectations': True, 'perf_id': 'tester'}, | |
| 213 perf_expectations_path=perf_expectations_path) | |
| 214 | |
| 215 self._ProcessLog(parser, input_file) | |
| 216 | |
| 217 actual = json.loads('\n'.join(parser.PerformanceLogs()[graph_file])) | |
| 218 expected = json.load(open( | |
| 219 os.path.join(test_env.DATA_PATH, 'graphing_processor-graphs.dat'))) | |
| 220 | |
| 221 self.assertEqual(len(actual), len(expected)) | |
| 222 | |
| 223 for graph in expected: | |
| 224 self.assertTrue(graph['name'] in actual) | |
| 225 for element in graph: | |
| 226 self.assertEqual(actual[graph['name']][element], graph[element]) | |
| 227 return parser | |
| 228 | |
| 229 def testPerfExpectationsImproveRelative(self): | |
| 230 step = self._TestPerfExpectations('perf_improve_relative.json') | |
| 231 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (25.00%)') | |
| 232 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 233 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 234 | |
| 235 def testPerfExpectationsRegressRelative(self): | |
| 236 step = self._TestPerfExpectations('perf_regress_relative.json') | |
| 237 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (50.00%)') | |
| 238 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 239 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 240 | |
| 241 def testPerfExpectationsImproveRelativeFloat(self): | |
| 242 step = self._TestPerfExpectations('perf_improve_relative_float.json') | |
| 243 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (25.10%)') | |
| 244 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 245 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 246 | |
| 247 def testPerfExpectationsImproveRelativeFloatNonSci(self): | |
| 248 step = self._TestPerfExpectations( | |
| 249 'perf_improve_relative_float_nonscientific.json') | |
| 250 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (25.10%)') | |
| 251 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 252 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 253 | |
| 254 def testPerfExpectationsRegressRelativeFloat(self): | |
| 255 step = self._TestPerfExpectations('perf_regress_relative_float.json') | |
| 256 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (49.96%)') | |
| 257 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 258 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 259 | |
| 260 def testPerfExpectationsRegressAbsolute(self): | |
| 261 step = self._TestPerfExpectations('perf_regress_absolute.json') | |
| 262 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (2.49%)') | |
| 263 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 264 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 265 | |
| 266 def testPerfExpectationsImproveAbsolute(self): | |
| 267 step = self._TestPerfExpectations('perf_improve_absolute.json') | |
| 268 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (3.20%)') | |
| 269 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 270 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 271 | |
| 272 def testPerfExpectationsRegressAbsoluteFloat(self): | |
| 273 step = self._TestPerfExpectations('perf_regress_absolute_float.json') | |
| 274 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (2.55%)') | |
| 275 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 276 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 277 | |
| 278 def testPerfExpectationsRegressAbsoluteFloatNonSci(self): | |
| 279 step = self._TestPerfExpectations( | |
| 280 'perf_regress_absolute_float_nonscientific.json') | |
| 281 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (2.55%)') | |
| 282 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 283 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 284 | |
| 285 def testPerfExpectationsImproveAbsoluteFloat(self): | |
| 286 step = self._TestPerfExpectations('perf_improve_absolute_float.json') | |
| 287 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (3.21%)') | |
| 288 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 289 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 290 | |
| 291 def testPerfExpectationsNochangeRelative(self): | |
| 292 step = self._TestPerfExpectations('perf_nochange_relative.json') | |
| 293 expected = ('12t_cc: 50.2k') | |
| 294 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 295 self.assertEqual(SUCCESS, step.evaluateCommand('mycommand')) | |
| 296 | |
| 297 def testPerfExpectationsNochangeAbsolute(self): | |
| 298 step = self._TestPerfExpectations('perf_nochange_absolute.json') | |
| 299 expected = ('12t_cc: 50.2k') | |
| 300 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 301 self.assertEqual(SUCCESS, step.evaluateCommand('mycommand')) | |
| 302 | |
| 303 def testPerfExpectationsNochangeRelativeFloat(self): | |
| 304 step = self._TestPerfExpectations('perf_nochange_relative_float.json') | |
| 305 expected = ('12t_cc: 50.2k') | |
| 306 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 307 self.assertEqual(SUCCESS, step.evaluateCommand('mycommand')) | |
| 308 | |
| 309 def testPerfExpectationsNochangeAbsoluteFloat(self): | |
| 310 step = self._TestPerfExpectations('perf_nochange_absolute_float.json') | |
| 311 expected = ('12t_cc: 50.2k') | |
| 312 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 313 self.assertEqual(SUCCESS, step.evaluateCommand('mycommand')) | |
| 314 | |
| 315 def testPerfExpectationsBetterLowerSuccess(self): | |
| 316 step = self._TestPerfExpectations('perf_test_better_lower_success.json') | |
| 317 expected = ('12t_cc: 50.2k') | |
| 318 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 319 self.assertEqual(SUCCESS, step.evaluateCommand('mycommand')) | |
| 320 | |
| 321 def testPerfExpectationsBetterLowerImprove(self): | |
| 322 step = self._TestPerfExpectations('perf_test_better_lower_improve.json') | |
| 323 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (0.01%)') | |
| 324 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 325 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 326 | |
| 327 def testPerfExpectationsBetterLowerRegress(self): | |
| 328 step = self._TestPerfExpectations('perf_test_better_lower_regress.json') | |
| 329 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (0.01%)') | |
| 330 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 331 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 332 | |
| 333 def testPerfExpectationsBetterHigherSuccess(self): | |
| 334 step = self._TestPerfExpectations('perf_test_better_higher_success.json') | |
| 335 expected = ('12t_cc: 50.2k') | |
| 336 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 337 self.assertEqual(SUCCESS, step.evaluateCommand('mycommand')) | |
| 338 | |
| 339 def testPerfExpectationsBetterHigherImprove(self): | |
| 340 step = self._TestPerfExpectations('perf_test_better_higher_improve.json') | |
| 341 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (0.01%)') | |
| 342 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 343 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 344 | |
| 345 def testPerfExpectationsBetterHigherRegress(self): | |
| 346 step = self._TestPerfExpectations('perf_test_better_higher_regress.json') | |
| 347 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (0.01%)') | |
| 348 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 349 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 350 | |
| 351 def testPerfExpectationsRegressZero(self): | |
| 352 step = self._TestPerfExpectations( | |
| 353 'perf_test_better_lower_regress_zero.json') | |
| 354 expected = ('PERF_REGRESS: vm_final_browser/1t_vm_b (inf%)') | |
| 355 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 356 self.assertEqual(FAILURE, step.evaluateCommand('mycommand')) | |
| 357 | |
| 358 def testPerfExpectationsImproveZero(self): | |
| 359 step = self._TestPerfExpectations( | |
| 360 'perf_test_better_higher_improve_zero.json') | |
| 361 expected = ('PERF_IMPROVE: vm_final_browser/1t_vm_b (inf%)') | |
| 362 self.assertEqual(expected, step.PerformanceSummary()[0]) | |
| 363 self.assertEqual(WARNINGS, step.evaluateCommand('mycommand')) | |
| 364 | |
| 365 | |
| 366 class GraphingPageCyclerLogProcessorPerfTest(LogProcessorTest): | |
| 367 """Unit tests for the GraphingPageCyclerLogProcessor class.""" | |
| 368 | |
| 369 def testPageCycler(self): | |
| 370 parser = self._ConstructDefaultProcessor( | |
| 371 process_log_utils.GraphingPageCyclerLogProcessor) | |
| 372 self._ProcessLog(parser, 'page_cycler.log') | |
| 373 | |
| 374 expected = 't: 2.32k' | |
| 375 self.assertEqual(expected, parser.PerformanceSummary()[0]) | |
| 376 | |
| 377 | |
| 378 if __name__ == '__main__': | |
| 379 unittest.main() | |
| OLD | NEW |