Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(57)

Side by Side Diff: scripts/slave/results_dashboard.py

Issue 217053012: Make results_dashboard send just one request per test run. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | scripts/slave/runtest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Functions for adding results to perf dashboard.""" 6 """Functions for adding results to the Performance Dashboard."""
7 7
8 import calendar 8 import calendar
9 import datetime 9 import datetime
10 import httplib 10 import httplib
11 import json 11 import json
12 import os 12 import os
13 import urllib 13 import urllib
14 import urllib2 14 import urllib2
15 15
16 from slave import slave_utils 16 from slave import slave_utils
17 17
18 # The paths in the results dashboard URLs for sending and viewing results. 18 # The paths in the results dashboard URLs for sending and viewing results.
19 SEND_RESULTS_PATH = '/add_point' 19 SEND_RESULTS_PATH = '/add_point'
20 RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s' 20 RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s'
21
21 # CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache 22 # CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache
22 # results which need to be retried. 23 # results which need to be retried.
23 CACHE_DIR = 'results_dashboard' 24 CACHE_DIR = 'results_dashboard'
24 CACHE_FILENAME = 'results_to_retry' 25 CACHE_FILENAME = 'results_to_retry'
25 26
26 27
27 #TODO(xusydoc): set fail_hard to True when bots stabilize. See crbug.com/222607. 28 #TODO(xusydoc): set fail_hard to True when bots stabilize. See crbug.com/222607.
28 def SendResults(logname, lines, system, test_name, url, masterid, 29 def SendResults(logs_dict, perf_id, test, url, mastername, buildername,
29 buildername, buildnumber, build_dir, supplemental_columns, 30 buildnumber, build_dir, supplemental_columns,
30 fail_hard=False): 31 fail_hard=False):
31 """Send results to the Chrome Performance Dashboard. 32 """Takes data in the old log format, and sends it to the dashboard.
32 33
33 Try to send any data from the cache file (which contains any data that wasn't 34 This function tries to send any data from the cache file (which contains any
34 successfully sent in a previous run), as well as the data from the arguments 35 data that wasn't successfully sent in a previous run), as well as the data
35 provided in this run. 36 from the arguments provided in this run.
36 37
37 Args: 38 Args:
38 logname: Summary log file name. Contains the chart name. 39 logs_dict: Map of log filename (which contains the chart name) to a list of
39 lines: List of log-file lines. Each line should be valid JSON, and should 40 log file lines. Each one of these lines should be valid JSON and should
40 include the properties 'traces' and 'rev'. 41 include the properties 'traces' and 'rev'.
41 system: A string such as 'linux-release', which comes from perf_id. This 42 perf_id: A string such as 'linux-release'. This is the bot name used on
42 is used to identify the bot in the Chrome Performance Dashboard. 43 the dashboard.
43 test_name: Test name, which will be used as the first part of the slash 44 test: Test suite name (Note: you can also provide nested subtests
44 -separated test path on the Dashboard. (Note: If there are no slashes 45 under the top-level test by separating names with a slash.
45 in this name, then this is the test suite name. If you want to have 46 url: Performance Dashboard URL.
46 nested tests under one test suite, you could use a slash here.) 47 mastername: Buildbot master name, e.g. 'chromium.perf'. Note that this is
47 url: Performance Dashboard URL (including schema). 48 *not* necessarily the same as the "master name" used on the dashboard.
48 masterid: ID of buildbot master, e.g. 'chromium.perf' 49 This was previously incorrectly called the "master id".
49 buildername: Builder name, e.g. 'Linux QA Perf (1)' 50 buildername: Builder name.
50 buildnumber: Build number (a string containing the number). 51 buildnumber: Build number as a string.
51 build_dir: Directory name, where the cache dir shall be. 52 build_dir: Directory name, where the cache dir shall be.
52 supplemental_columns: Dict of supplemental data to upload. 53 supplemental_columns: Dict of supplemental data to upload.
53 fail_hard: Whether a fatal error will cause this step of the buildbot 54 fail_hard: Whether a fatal error will cause this step of the buildbot
54 run to be annotated with "@@@STEP_EXCEPTION@@@". 55 run to be annotated with "@@@STEP_EXCEPTION@@@".
55
56 Returns: None
57 """ 56 """
58 if not logname.endswith('-summary.dat'): 57 new_results_line = _GetResultsJson(logs_dict, perf_id, test, url,
59 return 58 mastername, buildername, buildnumber,
60
61 new_results_line = _GetResultsJson(logname, lines, system, test_name, url,
62 masterid, buildername, buildnumber,
63 supplemental_columns) 59 supplemental_columns)
64 # Write the new request line to the cache, in case of errors. 60 # Write the new request line to the cache, in case of errors.
65 cache_filename = _GetCacheFileName(build_dir) 61 cache_filename = _GetCacheFileName(build_dir)
66 cache = open(cache_filename, 'ab') 62 cache = open(cache_filename, 'ab')
67 cache.write('\n' + new_results_line) 63 cache.write('\n' + new_results_line)
68 cache.close() 64 cache.close()
69 65
70 # Send all the results from this run and the previous cache to the dashboard. 66 # Send all the results from this run and the previous cache to the dashboard.
71 cache = open(cache_filename, 'rb') 67 cache = open(cache_filename, 'rb')
72 cache_lines = cache.readlines() 68 cache_lines = cache.readlines()
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
115 cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR) 111 cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR)
116 if not os.path.exists(cache_dir): 112 if not os.path.exists(cache_dir):
117 os.makedirs(cache_dir) 113 os.makedirs(cache_dir)
118 cache_filename = os.path.join(cache_dir, CACHE_FILENAME) 114 cache_filename = os.path.join(cache_dir, CACHE_FILENAME)
119 if not os.path.exists(cache_filename): 115 if not os.path.exists(cache_filename):
120 # Create the file. 116 # Create the file.
121 open(cache_filename, 'wb').close() 117 open(cache_filename, 'wb').close()
122 return cache_filename 118 return cache_filename
123 119
124 120
125 def _GetResultsJson(logname, lines, system, test_name, url, masterid, 121 def _GetResultsJson(logs_dict, perf_id, test_name, url, mastername, buildername,
126 buildername, buildnumber, supplemental_columns): 122 buildnumber, supplemental_columns):
127 """Prepare JSON to send from the data in the given arguments. 123 """Prepare JSON to send from the data in the given arguments.
128 124
129 Args: 125 Args:
130 logname: Summary log file name. 126 log_dict: A dictionary mapping summary log file names to lists of log-file
131 lines: List of log-file lines. Each line is valid JSON which, when 127 lines. Each line is valid JSON which when parsed is a dictionary that
132 deserialized, is a dict containing the keys 'traces' and 'rev'. 128 has the keys 'traces' and 'rev'.
133 system: A string such as 'linux-release', which comes from perf_id. 129 perf_id: A string such as 'linux-release'.
134 test_name: Test name. 130 test_name: Test name.
135 url: Chrome Performance Dashboard URL. 131 url: Chrome Performance Dashboard URL.
136 masterid: Buildbot master ID. 132 mastername: Buildbot master name (this is lowercase with dots, and is not
133 necessarily the same as the "master" sent to the dashboard).
137 buildername: Builder name. 134 buildername: Builder name.
138 buildnumber: Build number. 135 buildnumber: Build number.
139 supplemental_columns: Dict of supplemental data to add. 136 supplemental_columns: Dict of supplemental data to add.
140 137
141 Returns: 138 Returns:
142 JSON that shall be sent to the Chrome Performance Dashboard. 139 JSON that shall be sent to the Chrome Performance Dashboard.
143 """ 140 """
144 results_to_add = [] 141 results_to_add = []
142 # Note that this master string is not the same as "mastername"!
145 master = slave_utils.GetActiveMaster() 143 master = slave_utils.GetActiveMaster()
146 bot = system
147 chart_name = logname.replace('-summary.dat', '')
148 for line in lines:
149 data = json.loads(line)
150 revision, revision_columns = _RevisionNumberColumns(data, master)
151 144
152 for (trace_name, trace_values) in data['traces'].iteritems(): 145 for logname, log in logs_dict.iteritems():
ghost stip (do not use) 2014/03/31 18:10:48 we should have some kind of limit here, otherwise
153 is_important = trace_name in data.get('important', []) 146 if not logname.endswith('-summary.dat'):
154 test_path = _TestPath(test_name, chart_name, trace_name) 147 continue
155 result = { 148 lines = [str(l).rstrip() for l in log]
156 'master': master, 149 chart_name = logname.replace('-summary.dat', '')
157 'bot': system,
158 'test': test_path,
159 'revision': revision,
160 'masterid': masterid,
161 'buildername': buildername,
162 'buildnumber': buildnumber,
163 'supplemental_columns': {}
164 }
165 # Add the supplemental_columns values that were passed in after the
166 # calculated revision column values so that these can be overwritten.
167 result['supplemental_columns'].update(revision_columns)
168 result['supplemental_columns'].update(supplemental_columns)
169 # Test whether we have x/y data.
170 have_multi_value_data = False
171 for value in trace_values:
172 if isinstance(value, list):
173 have_multi_value_data = True
174 if have_multi_value_data:
175 result['data'] = trace_values
176 else:
177 result['value'] = trace_values[0]
178 result['error'] = trace_values[1]
179 150
180 if data.get('units'): 151 for line in lines:
181 result['units'] = data['units'] 152 data = json.loads(line)
182 if data.get('units_x'): 153 revision, revision_columns = _RevisionNumberColumns(data, master)
183 result['units_x'] = data['units_x'] 154
184 if data.get('stack'): 155 for (trace_name, trace_values) in data['traces'].iteritems():
185 result['stack'] = data['stack'] 156 is_important = trace_name in data.get('important', [])
186 if is_important: 157 test_path = _TestPath(test_name, chart_name, trace_name)
187 result['important'] = True 158 result = {
188 results_to_add.append(result) 159 'master': master,
189 _PrintLinkStep(url, master, bot, test_name, revision) 160 'bot': perf_id,
161 'test': test_path,
162 'revision': revision,
163 'masterid': mastername,
164 'buildername': buildername,
165 'buildnumber': buildnumber,
166 'supplemental_columns': {}
167 }
168 # Add the supplemental_columns values that were passed in after the
169 # calculated revision column values so that these can be overwritten.
170 result['supplemental_columns'].update(revision_columns)
171 result['supplemental_columns'].update(supplemental_columns)
172 # Test whether we have x/y data.
173 have_multi_value_data = False
174 for value in trace_values:
175 if isinstance(value, list):
176 have_multi_value_data = True
177 if have_multi_value_data:
178 result['data'] = trace_values
179 else:
180 result['value'] = trace_values[0]
181 result['error'] = trace_values[1]
182
183 if data.get('units'):
184 result['units'] = data['units']
185 if data.get('units_x'):
186 result['units_x'] = data['units_x']
187 if is_important:
188 result['important'] = True
189 results_to_add.append(result)
190
191 _PrintLinkStep(url, master, perf_id, test_name, revision)
190 return json.dumps(results_to_add) 192 return json.dumps(results_to_add)
191 193
192 194
193 def _RevisionNumberColumns(data, master): 195 def _RevisionNumberColumns(data, master):
194 """Get the revision number and revision-related columns from the given data. 196 """Get the revision number and revision-related columns from the given data.
195 197
196 Args: 198 Args:
197 data: A dict of information from one line of the log file. 199 data: A dict of information from one line of the log file.
198 master: The name of the buildbot master. 200 master: The name of the buildbot master.
199 201
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
306 system: A string such as 'linux-release', which comes from perf_id. 308 system: A string such as 'linux-release', which comes from perf_id.
307 test_path: Slash-separated test path, e.g. "moz/times" 309 test_path: Slash-separated test path, e.g. "moz/times"
308 revision: Revision number. 310 revision: Revision number.
309 """ 311 """
310 results_link = url + RESULTS_LINK_PATH % ( 312 results_link = url + RESULTS_LINK_PATH % (
311 urllib.quote(master), 313 urllib.quote(master),
312 urllib.quote(system), 314 urllib.quote(system),
313 urllib.quote(test_path), 315 urllib.quote(test_path),
314 revision) 316 revision)
315 print '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link) 317 print '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link)
OLDNEW
« no previous file with comments | « no previous file | scripts/slave/runtest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698