OLD | NEW |
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # | 2 # |
3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
5 # met: | 5 # met: |
6 # | 6 # |
7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
11 # in the documentation and/or other materials provided with the | 11 # in the documentation and/or other materials provided with the |
12 # distribution. | 12 # distribution. |
13 # * Neither the name of Google Inc. nor the names of its | 13 # * Neither the name of Google Inc. nor the names of its |
14 # contributors may be used to endorse or promote products derived from | 14 # contributors may be used to endorse or promote products derived from |
15 # this software without specific prior written permission. | 15 # this software without specific prior written permission. |
16 # | 16 # |
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | 28 |
29 import json | 29 import json |
30 import logging | |
31 import subprocess | |
32 import sys | |
33 import time | |
34 import urllib2 | |
35 import xml.dom.minidom | |
36 | 30 |
37 from webkitpy.common.checkout.scm.detection import SCMDetector | 31 # FIXME: This is the left-overs from when we used to generate JSON here. |
38 from webkitpy.common.net.file_uploader import FileUploader | 32 # Now it's a group of functions used by a hodge-podge of different classes. |
39 | 33 # This file should just go away entirely. |
40 # A JSON results generator for generic tests. | |
41 # FIXME: move this code out of the layout_package directory. | |
42 | |
43 _log = logging.getLogger(__name__) | |
44 | 34 |
45 _JSON_PREFIX = "ADD_RESULTS(" | 35 _JSON_PREFIX = "ADD_RESULTS(" |
46 _JSON_SUFFIX = ");" | 36 _JSON_SUFFIX = ");" |
47 | 37 |
48 | 38 |
49 def has_json_wrapper(string): | 39 def has_json_wrapper(string): |
50 return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX) | 40 return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX) |
51 | 41 |
52 | 42 |
53 def strip_json_wrapper(json_content): | 43 def strip_json_wrapper(json_content): |
(...skipping 10 matching lines...) Expand all Loading... |
64 | 54 |
65 | 55 |
66 def write_json(filesystem, json_object, file_path, callback=None): | 56 def write_json(filesystem, json_object, file_path, callback=None): |
67 # Specify separators in order to get compact encoding. | 57 # Specify separators in order to get compact encoding. |
68 json_string = json.dumps(json_object, separators=(',', ':')) | 58 json_string = json.dumps(json_object, separators=(',', ':')) |
69 if callback: | 59 if callback: |
70 json_string = callback + "(" + json_string + ");" | 60 json_string = callback + "(" + json_string + ");" |
71 filesystem.write_text_file(file_path, json_string) | 61 filesystem.write_text_file(file_path, json_string) |
72 | 62 |
73 | 63 |
74 def convert_trie_to_flat_paths(trie, prefix=None): | |
75 """Converts the directory structure in the given trie to flat paths, prepend
ing a prefix to each.""" | |
76 result = {} | |
77 for name, data in trie.iteritems(): | |
78 if prefix: | |
79 name = prefix + "/" + name | |
80 | |
81 if len(data) and not "results" in data: | |
82 result.update(convert_trie_to_flat_paths(data, name)) | |
83 else: | |
84 result[name] = data | |
85 | |
86 return result | |
87 | |
88 | |
89 def add_path_to_trie(path, value, trie): | 64 def add_path_to_trie(path, value, trie): |
90 """Inserts a single flat directory path and associated value into a director
y trie structure.""" | 65 """Inserts a single flat directory path and associated value into a director
y trie structure.""" |
91 if not "/" in path: | 66 if not "/" in path: |
92 trie[path] = value | 67 trie[path] = value |
93 return | 68 return |
94 | 69 |
95 directory, slash, rest = path.partition("/") | 70 directory, slash, rest = path.partition("/") |
96 if not directory in trie: | 71 if not directory in trie: |
97 trie[directory] = {} | 72 trie[directory] = {} |
98 add_path_to_trie(rest, value, trie[directory]) | 73 add_path_to_trie(rest, value, trie[directory]) |
99 | 74 |
| 75 |
100 def test_timings_trie(port, individual_test_timings): | 76 def test_timings_trie(port, individual_test_timings): |
101 """Breaks a test name into chunks by directory and puts the test time as a v
alue in the lowest part, e.g. | 77 """Breaks a test name into chunks by directory and puts the test time as a v
alue in the lowest part, e.g. |
102 foo/bar/baz.html: 1ms | 78 foo/bar/baz.html: 1ms |
103 foo/bar/baz1.html: 3ms | 79 foo/bar/baz1.html: 3ms |
104 | 80 |
105 becomes | 81 becomes |
106 foo: { | 82 foo: { |
107 bar: { | 83 bar: { |
108 baz.html: 1, | 84 baz.html: 1, |
109 baz1.html: 3 | 85 baz1.html: 3 |
110 } | 86 } |
111 } | 87 } |
112 """ | 88 """ |
113 trie = {} | 89 trie = {} |
114 for test_result in individual_test_timings: | 90 for test_result in individual_test_timings: |
115 test = test_result.test_name | 91 test = test_result.test_name |
116 | 92 |
117 add_path_to_trie(test, int(1000 * test_result.test_run_time), trie) | 93 add_path_to_trie(test, int(1000 * test_result.test_run_time), trie) |
118 | 94 |
119 return trie | 95 return trie |
120 | |
121 # FIXME: We already have a TestResult class in test_results.py | |
122 class TestResult(object): | |
123 """A simple class that represents a single test result.""" | |
124 | |
125 # Test modifier constants. | |
126 (NONE, FAILS, FLAKY, DISABLED) = range(4) | |
127 | |
128 def __init__(self, test, failed=False, elapsed_time=0): | |
129 self.test_name = test | |
130 self.failed = failed | |
131 self.test_run_time = elapsed_time | |
132 | |
133 test_name = test | |
134 try: | |
135 test_name = test.split('.')[1] | |
136 except IndexError: | |
137 _log.warn("Invalid test name: %s.", test) | |
138 pass | |
139 | |
140 if test_name.startswith('FAILS_'): | |
141 self.modifier = self.FAILS | |
142 elif test_name.startswith('FLAKY_'): | |
143 self.modifier = self.FLAKY | |
144 elif test_name.startswith('DISABLED_'): | |
145 self.modifier = self.DISABLED | |
146 else: | |
147 self.modifier = self.NONE | |
148 | |
149 def fixable(self): | |
150 return self.failed or self.modifier == self.DISABLED | |
151 | |
152 | |
153 class JSONResultsGeneratorBase(object): | |
154 """A JSON results generator for generic tests.""" | |
155 | |
156 MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 | |
157 # Min time (seconds) that will be added to the JSON. | |
158 MIN_TIME = 1 | |
159 | |
160 # Note that in non-chromium tests those chars are used to indicate | |
161 # test modifiers (FAILS, FLAKY, etc) but not actual test results. | |
162 PASS_RESULT = "P" | |
163 SKIP_RESULT = "X" | |
164 FAIL_RESULT = "F" | |
165 FLAKY_RESULT = "L" | |
166 NO_DATA_RESULT = "N" | |
167 | |
168 MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT, | |
169 TestResult.DISABLED: SKIP_RESULT, | |
170 TestResult.FAILS: FAIL_RESULT, | |
171 TestResult.FLAKY: FLAKY_RESULT} | |
172 | |
173 VERSION = 4 | |
174 VERSION_KEY = "version" | |
175 RESULTS = "results" | |
176 TIMES = "times" | |
177 BUILD_NUMBERS = "buildNumbers" | |
178 TIME = "secondsSinceEpoch" | |
179 TESTS = "tests" | |
180 | |
181 FIXABLE_COUNT = "fixableCount" | |
182 FIXABLE = "fixableCounts" | |
183 ALL_FIXABLE_COUNT = "allFixableCount" | |
184 | |
185 RESULTS_FILENAME = "results.json" | |
186 TIMES_MS_FILENAME = "times_ms.json" | |
187 INCREMENTAL_RESULTS_FILENAME = "incremental_results.json" | |
188 | |
189 URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson
=1&testtype=%s&master=%s" | |
190 | |
191 # FIXME: Remove generate_incremental_results once the reference to it in | |
192 # http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtes
t_slave_utils.py | |
193 # has been removed. | |
194 def __init__(self, port, builder_name, build_name, build_number, | |
195 results_file_base_path, builder_base_url, | |
196 test_results_map, svn_repositories=None, | |
197 test_results_server=None, | |
198 test_type="", | |
199 master_name="", | |
200 generate_incremental_results=None): | |
201 """Modifies the results.json file. Grabs it off the archive directory | |
202 if it is not found locally. | |
203 | |
204 Args | |
205 port: port-specific wrapper | |
206 builder_name: the builder name (e.g. Webkit). | |
207 build_name: the build name (e.g. webkit-rel). | |
208 build_number: the build number. | |
209 results_file_base_path: Absolute path to the directory containing the | |
210 results json file. | |
211 builder_base_url: the URL where we have the archived test results. | |
212 If this is None no archived results will be retrieved. | |
213 test_results_map: A dictionary that maps test_name to TestResult. | |
214 svn_repositories: A (json_field_name, svn_path) pair for SVN | |
215 repositories that tests rely on. The SVN revision will be | |
216 included in the JSON with the given json_field_name. | |
217 test_results_server: server that hosts test results json. | |
218 test_type: test type string (e.g. 'layout-tests'). | |
219 master_name: the name of the buildbot master. | |
220 """ | |
221 self._port = port | |
222 self._filesystem = port._filesystem | |
223 self._executive = port._executive | |
224 self._builder_name = builder_name | |
225 self._build_name = build_name | |
226 self._build_number = build_number | |
227 self._builder_base_url = builder_base_url | |
228 self._results_directory = results_file_base_path | |
229 | |
230 self._test_results_map = test_results_map | |
231 self._test_results = test_results_map.values() | |
232 | |
233 self._svn_repositories = svn_repositories | |
234 if not self._svn_repositories: | |
235 self._svn_repositories = {} | |
236 | |
237 self._test_results_server = test_results_server | |
238 self._test_type = test_type | |
239 self._master_name = master_name | |
240 | |
241 self._archived_results = None | |
242 | |
243 def generate_json_output(self): | |
244 json_object = self.get_json() | |
245 if json_object: | |
246 file_path = self._filesystem.join(self._results_directory, self.INCR
EMENTAL_RESULTS_FILENAME) | |
247 write_json(self._filesystem, json_object, file_path) | |
248 | |
249 def generate_times_ms_file(self): | |
250 # FIXME: rename to generate_times_ms_file. This needs to be coordinated
with | |
251 # changing the calls to this on the chromium build slaves. | |
252 times = test_timings_trie(self._port, self._test_results_map.values()) | |
253 file_path = self._filesystem.join(self._results_directory, self.TIMES_MS
_FILENAME) | |
254 write_json(self._filesystem, times, file_path) | |
255 | |
256 def get_json(self): | |
257 """Gets the results for the results.json file.""" | |
258 results_json = {} | |
259 | |
260 if not results_json: | |
261 results_json, error = self._get_archived_json_results() | |
262 if error: | |
263 # If there was an error don't write a results.json | |
264 # file at all as it would lose all the information on the | |
265 # bot. | |
266 _log.error("Archive directory is inaccessible. Not " | |
267 "modifying or clobbering the results.json " | |
268 "file: " + str(error)) | |
269 return None | |
270 | |
271 builder_name = self._builder_name | |
272 if results_json and builder_name not in results_json: | |
273 _log.debug("Builder name (%s) is not in the results.json file." | |
274 % builder_name) | |
275 | |
276 self._convert_json_to_current_version(results_json) | |
277 | |
278 if builder_name not in results_json: | |
279 results_json[builder_name] = ( | |
280 self._create_results_for_builder_json()) | |
281 | |
282 results_for_builder = results_json[builder_name] | |
283 | |
284 if builder_name: | |
285 self._insert_generic_metadata(results_for_builder) | |
286 | |
287 self._insert_failure_summaries(results_for_builder) | |
288 | |
289 # Update the all failing tests with result type and time. | |
290 tests = results_for_builder[self.TESTS] | |
291 all_failing_tests = self._get_failed_test_names() | |
292 all_failing_tests.update(convert_trie_to_flat_paths(tests)) | |
293 | |
294 for test in all_failing_tests: | |
295 self._insert_test_time_and_result(test, tests) | |
296 | |
297 return results_json | |
298 | |
299 def set_archived_results(self, archived_results): | |
300 self._archived_results = archived_results | |
301 | |
302 def upload_json_files(self, json_files): | |
303 """Uploads the given json_files to the test_results_server (if the | |
304 test_results_server is given).""" | |
305 if not self._test_results_server: | |
306 return | |
307 | |
308 if not self._master_name: | |
309 _log.error("--test-results-server was set, but --master-name was not
. Not uploading JSON files.") | |
310 return | |
311 | |
312 _log.info("Uploading JSON files for builder: %s", self._builder_name) | |
313 attrs = [("builder", self._builder_name), | |
314 ("testtype", self._test_type), | |
315 ("master", self._master_name)] | |
316 | |
317 files = [(file, self._filesystem.join(self._results_directory, file)) | |
318 for file in json_files] | |
319 | |
320 url = "http://%s/testfile/upload" % self._test_results_server | |
321 # Set uploading timeout in case appengine server is having problems. | |
322 # 120 seconds are more than enough to upload test results. | |
323 uploader = FileUploader(url, 120) | |
324 try: | |
325 response = uploader.upload_as_multipart_form_data(self._filesystem,
files, attrs) | |
326 if response: | |
327 if response.code == 200: | |
328 _log.info("JSON uploaded.") | |
329 else: | |
330 _log.debug("JSON upload failed, %d: '%s'" % (response.code,
response.read())) | |
331 else: | |
332 _log.error("JSON upload failed; no response returned") | |
333 except Exception, err: | |
334 _log.error("Upload failed: %s" % err) | |
335 return | |
336 | |
337 | |
338 def _get_test_timing(self, test_name): | |
339 """Returns test timing data (elapsed time) in second | |
340 for the given test_name.""" | |
341 if test_name in self._test_results_map: | |
342 # Floor for now to get time in seconds. | |
343 return int(self._test_results_map[test_name].test_run_time) | |
344 return 0 | |
345 | |
346 def _get_failed_test_names(self): | |
347 """Returns a set of failed test names.""" | |
348 return set([r.test_name for r in self._test_results if r.failed]) | |
349 | |
350 def _get_modifier_char(self, test_name): | |
351 """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, | |
352 PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier | |
353 for the given test_name. | |
354 """ | |
355 if test_name not in self._test_results_map: | |
356 return self.__class__.NO_DATA_RESULT | |
357 | |
358 test_result = self._test_results_map[test_name] | |
359 if test_result.modifier in self.MODIFIER_TO_CHAR.keys(): | |
360 return self.MODIFIER_TO_CHAR[test_result.modifier] | |
361 | |
362 return self.__class__.PASS_RESULT | |
363 | |
364 def _get_result_char(self, test_name): | |
365 """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, | |
366 PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result | |
367 for the given test_name. | |
368 """ | |
369 if test_name not in self._test_results_map: | |
370 return self.__class__.NO_DATA_RESULT | |
371 | |
372 test_result = self._test_results_map[test_name] | |
373 if test_result.modifier == TestResult.DISABLED: | |
374 return self.__class__.SKIP_RESULT | |
375 | |
376 if test_result.failed: | |
377 return self.__class__.FAIL_RESULT | |
378 | |
379 return self.__class__.PASS_RESULT | |
380 | |
381 def _get_svn_revision(self, in_directory): | |
382 """Returns the svn revision for the given directory. | |
383 | |
384 Args: | |
385 in_directory: The directory where svn is to be run. | |
386 """ | |
387 | |
388 # FIXME: We initialize this here in order to engage the stupid windows h
acks :). | |
389 # We can't reuse an existing scm object because the specific directories
may | |
390 # be part of other checkouts. | |
391 self._port.host.initialize_scm() | |
392 scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(i
n_directory) | |
393 if scm: | |
394 return scm.svn_revision(in_directory) | |
395 return "" | |
396 | |
397 def _get_archived_json_results(self): | |
398 """Download JSON file that only contains test | |
399 name list from test-results server. This is for generating incremental | |
400 JSON so the file generated has info for tests that failed before but | |
401 pass or are skipped from current run. | |
402 | |
403 Returns (archived_results, error) tuple where error is None if results | |
404 were successfully read. | |
405 """ | |
406 results_json = {} | |
407 old_results = None | |
408 error = None | |
409 | |
410 if not self._test_results_server: | |
411 return {}, None | |
412 | |
413 results_file_url = (self.URL_FOR_TEST_LIST_JSON % | |
414 (urllib2.quote(self._test_results_server), | |
415 urllib2.quote(self._builder_name), | |
416 self.RESULTS_FILENAME, | |
417 urllib2.quote(self._test_type), | |
418 urllib2.quote(self._master_name))) | |
419 | |
420 try: | |
421 # FIXME: We should talk to the network via a Host object. | |
422 results_file = urllib2.urlopen(results_file_url) | |
423 info = results_file.info() | |
424 old_results = results_file.read() | |
425 except urllib2.HTTPError, http_error: | |
426 # A non-4xx status code means the bot is hosed for some reason | |
427 # and we can't grab the results.json file off of it. | |
428 if (http_error.code < 400 and http_error.code >= 500): | |
429 error = http_error | |
430 except urllib2.URLError, url_error: | |
431 error = url_error | |
432 | |
433 if old_results: | |
434 # Strip the prefix and suffix so we can get the actual JSON object. | |
435 old_results = strip_json_wrapper(old_results) | |
436 | |
437 try: | |
438 results_json = json.loads(old_results) | |
439 except: | |
440 _log.debug("results.json was not valid JSON. Clobbering.") | |
441 # The JSON file is not valid JSON. Just clobber the results. | |
442 results_json = {} | |
443 else: | |
444 _log.debug('Old JSON results do not exist. Starting fresh.') | |
445 results_json = {} | |
446 | |
447 return results_json, error | |
448 | |
449 def _insert_failure_summaries(self, results_for_builder): | |
450 """Inserts aggregate pass/failure statistics into the JSON. | |
451 This method reads self._test_results and generates | |
452 FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries. | |
453 | |
454 Args: | |
455 results_for_builder: Dictionary containing the test results for a | |
456 single builder. | |
457 """ | |
458 # Insert the number of tests that failed or skipped. | |
459 fixable_count = len([r for r in self._test_results if r.fixable()]) | |
460 self._insert_item_into_raw_list(results_for_builder, | |
461 fixable_count, self.FIXABLE_COUNT) | |
462 | |
463 # Create a test modifiers (FAILS, FLAKY etc) summary dictionary. | |
464 entry = {} | |
465 for test_name in self._test_results_map.iterkeys(): | |
466 result_char = self._get_modifier_char(test_name) | |
467 entry[result_char] = entry.get(result_char, 0) + 1 | |
468 | |
469 # Insert the pass/skip/failure summary dictionary. | |
470 self._insert_item_into_raw_list(results_for_builder, entry, | |
471 self.FIXABLE) | |
472 | |
473 # Insert the number of all the tests that are supposed to pass. | |
474 all_test_count = len(self._test_results) | |
475 self._insert_item_into_raw_list(results_for_builder, | |
476 all_test_count, self.ALL_FIXABLE_COUNT) | |
477 | |
478 def _insert_item_into_raw_list(self, results_for_builder, item, key): | |
479 """Inserts the item into the list with the given key in the results for | |
480 this builder. Creates the list if no such list exists. | |
481 | |
482 Args: | |
483 results_for_builder: Dictionary containing the test results for a | |
484 single builder. | |
485 item: Number or string to insert into the list. | |
486 key: Key in results_for_builder for the list to insert into. | |
487 """ | |
488 if key in results_for_builder: | |
489 raw_list = results_for_builder[key] | |
490 else: | |
491 raw_list = [] | |
492 | |
493 raw_list.insert(0, item) | |
494 raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] | |
495 results_for_builder[key] = raw_list | |
496 | |
497 def _insert_item_run_length_encoded(self, item, encoded_results): | |
498 """Inserts the item into the run-length encoded results. | |
499 | |
500 Args: | |
501 item: String or number to insert. | |
502 encoded_results: run-length encoded results. An array of arrays, e.g. | |
503 [[3,'A'],[1,'Q']] encodes AAAQ. | |
504 """ | |
505 if len(encoded_results) and item == encoded_results[0][1]: | |
506 num_results = encoded_results[0][0] | |
507 if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: | |
508 encoded_results[0][0] = num_results + 1 | |
509 else: | |
510 # Use a list instead of a class for the run-length encoding since | |
511 # we want the serialized form to be concise. | |
512 encoded_results.insert(0, [1, item]) | |
513 | |
514 def _insert_generic_metadata(self, results_for_builder): | |
515 """ Inserts generic metadata (such as version number, current time etc) | |
516 into the JSON. | |
517 | |
518 Args: | |
519 results_for_builder: Dictionary containing the test results for | |
520 a single builder. | |
521 """ | |
522 self._insert_item_into_raw_list(results_for_builder, | |
523 self._build_number, self.BUILD_NUMBERS) | |
524 | |
525 # Include SVN revisions for the given repositories. | |
526 for (name, path) in self._svn_repositories: | |
527 # Note: for JSON file's backward-compatibility we use 'chrome' rathe
r | |
528 # than 'chromium' here. | |
529 lowercase_name = name.lower() | |
530 if lowercase_name == 'chromium': | |
531 lowercase_name = 'chrome' | |
532 self._insert_item_into_raw_list(results_for_builder, | |
533 self._get_svn_revision(path), | |
534 lowercase_name + 'Revision') | |
535 | |
536 self._insert_item_into_raw_list(results_for_builder, | |
537 int(time.time()), | |
538 self.TIME) | |
539 | |
540 def _insert_test_time_and_result(self, test_name, tests): | |
541 """ Insert a test item with its results to the given tests dictionary. | |
542 | |
543 Args: | |
544 tests: Dictionary containing test result entries. | |
545 """ | |
546 | |
547 result = self._get_result_char(test_name) | |
548 time = self._get_test_timing(test_name) | |
549 | |
550 this_test = tests | |
551 for segment in test_name.split("/"): | |
552 if segment not in this_test: | |
553 this_test[segment] = {} | |
554 this_test = this_test[segment] | |
555 | |
556 if not len(this_test): | |
557 self._populate_results_and_times_json(this_test) | |
558 | |
559 if self.RESULTS in this_test: | |
560 self._insert_item_run_length_encoded(result, this_test[self.RESULTS]
) | |
561 else: | |
562 this_test[self.RESULTS] = [[1, result]] | |
563 | |
564 if self.TIMES in this_test: | |
565 self._insert_item_run_length_encoded(time, this_test[self.TIMES]) | |
566 else: | |
567 this_test[self.TIMES] = [[1, time]] | |
568 | |
569 def _convert_json_to_current_version(self, results_json): | |
570 """If the JSON does not match the current version, converts it to the | |
571 current version and adds in the new version number. | |
572 """ | |
573 if self.VERSION_KEY in results_json: | |
574 archive_version = results_json[self.VERSION_KEY] | |
575 if archive_version == self.VERSION: | |
576 return | |
577 else: | |
578 archive_version = 3 | |
579 | |
580 # version 3->4 | |
581 if archive_version == 3: | |
582 num_results = len(results_json.values()) | |
583 for builder, results in results_json.iteritems(): | |
584 self._convert_tests_to_trie(results) | |
585 | |
586 results_json[self.VERSION_KEY] = self.VERSION | |
587 | |
588 def _convert_tests_to_trie(self, results): | |
589 if not self.TESTS in results: | |
590 return | |
591 | |
592 test_results = results[self.TESTS] | |
593 test_results_trie = {} | |
594 for test in test_results.iterkeys(): | |
595 single_test_result = test_results[test] | |
596 add_path_to_trie(test, single_test_result, test_results_trie) | |
597 | |
598 results[self.TESTS] = test_results_trie | |
599 | |
600 def _populate_results_and_times_json(self, results_and_times): | |
601 results_and_times[self.RESULTS] = [] | |
602 results_and_times[self.TIMES] = [] | |
603 return results_and_times | |
604 | |
605 def _create_results_for_builder_json(self): | |
606 results_for_builder = {} | |
607 results_for_builder[self.TESTS] = {} | |
608 return results_for_builder | |
609 | |
610 def _remove_items_over_max_number_of_builds(self, encoded_list): | |
611 """Removes items from the run-length encoded list after the final | |
612 item that exceeds the max number of builds to track. | |
613 | |
614 Args: | |
615 encoded_results: run-length encoded results. An array of arrays, e.g. | |
616 [[3,'A'],[1,'Q']] encodes AAAQ. | |
617 """ | |
618 num_builds = 0 | |
619 index = 0 | |
620 for result in encoded_list: | |
621 num_builds = num_builds + result[0] | |
622 index = index + 1 | |
623 if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: | |
624 return encoded_list[:index] | |
625 return encoded_list | |
626 | |
627 def _normalize_results_json(self, test, test_name, tests): | |
628 """ Prune tests where all runs pass or tests that no longer exist and | |
629 truncate all results to maxNumberOfBuilds. | |
630 | |
631 Args: | |
632 test: ResultsAndTimes object for this test. | |
633 test_name: Name of the test. | |
634 tests: The JSON object with all the test results for this builder. | |
635 """ | |
636 test[self.RESULTS] = self._remove_items_over_max_number_of_builds( | |
637 test[self.RESULTS]) | |
638 test[self.TIMES] = self._remove_items_over_max_number_of_builds( | |
639 test[self.TIMES]) | |
640 | |
641 is_all_pass = self._is_results_all_of_type(test[self.RESULTS], | |
642 self.PASS_RESULT) | |
643 is_all_no_data = self._is_results_all_of_type(test[self.RESULTS], | |
644 self.NO_DATA_RESULT) | |
645 max_time = max([time[1] for time in test[self.TIMES]]) | |
646 | |
647 # Remove all passes/no-data from the results to reduce noise and | |
648 # filesize. If a test passes every run, but takes > MIN_TIME to run, | |
649 # don't throw away the data. | |
650 if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): | |
651 del tests[test_name] | |
652 | |
653 def _is_results_all_of_type(self, results, type): | |
654 """Returns whether all the results are of the given type | |
655 (e.g. all passes).""" | |
656 return len(results) == 1 and results[0][1] == type | |
657 | |
658 | |
659 # Left here not to break anything. | |
660 class JSONResultsGenerator(JSONResultsGeneratorBase): | |
661 pass | |
OLD | NEW |