Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(734)

Side by Side Diff: scripts/slave/recipe_modules/chromium/api.py

Issue 745463003: Display telemetry chartjson output and result data on waterfall. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Added coverage. Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2013 The Chromium Authors. All rights reserved. 1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import re 5 import re
6 6
7 from slave import recipe_api 7 from slave import recipe_api
8 from slave import recipe_util 8 from slave import recipe_util
9 9
10 from . import builders 10 from . import builders
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 @recipe_util.returns_placeholder 172 @recipe_util.returns_placeholder
173 def test_launcher_filter(self, tests): 173 def test_launcher_filter(self, tests):
174 return TestLauncherFilterFileInputPlaceholder(self, tests) 174 return TestLauncherFilterFileInputPlaceholder(self, tests)
175 175
176 def runtest(self, test, args=None, xvfb=False, name=None, annotate=None, 176 def runtest(self, test, args=None, xvfb=False, name=None, annotate=None,
177 results_url=None, perf_dashboard_id=None, test_type=None, 177 results_url=None, perf_dashboard_id=None, test_type=None,
178 generate_json_file=False, results_directory=None, 178 generate_json_file=False, results_directory=None,
179 python_mode=False, spawn_dbus=True, parallel=False, 179 python_mode=False, spawn_dbus=True, parallel=False,
180 revision=None, webkit_revision=None, master_class_name=None, 180 revision=None, webkit_revision=None, master_class_name=None,
181 test_launcher_summary_output=None, flakiness_dash=None, 181 test_launcher_summary_output=None, flakiness_dash=None,
182 perf_id=None, perf_config=None, **kwargs): 182 perf_id=None, perf_config=None, chartjson_file=False, **kwargs):
183 """Return a runtest.py invocation.""" 183 """Return a runtest.py invocation."""
184 args = args or [] 184 args = args or []
185 assert isinstance(args, list) 185 assert isinstance(args, list)
186 186
187 t_name, ext = self.m.path.splitext(self.m.path.basename(test)) 187 t_name, ext = self.m.path.splitext(self.m.path.basename(test))
188 if not python_mode and self.m.platform.is_win and ext == '': 188 if not python_mode and self.m.platform.is_win and ext == '':
189 test += '.exe' 189 test += '.exe'
190 190
191 full_args = ['--target', self.c.build_config_fs] 191 full_args = ['--target', self.c.build_config_fs]
192 if self.c.TARGET_PLATFORM == 'ios': 192 if self.c.TARGET_PLATFORM == 'ios':
(...skipping 15 matching lines...) Expand all
208 full_args.append('--perf-dashboard-id=%s' % perf_dashboard_id) 208 full_args.append('--perf-dashboard-id=%s' % perf_dashboard_id)
209 if perf_id: 209 if perf_id:
210 full_args.append('--perf-id=%s' % perf_id) 210 full_args.append('--perf-id=%s' % perf_id)
211 if perf_config: 211 if perf_config:
212 full_args.extend(['--perf-config', perf_config]) 212 full_args.extend(['--perf-config', perf_config])
213 # This replaces the step_name that used to be sent via factory_properties. 213 # This replaces the step_name that used to be sent via factory_properties.
214 if test_type: 214 if test_type:
215 full_args.append('--test-type=%s' % test_type) 215 full_args.append('--test-type=%s' % test_type)
216 if generate_json_file: 216 if generate_json_file:
217 full_args.append('--generate-json-file') 217 full_args.append('--generate-json-file')
218 if chartjson_file:
219 full_args.append('--chartjson-file')
220 full_args.append(self.m.json.output())
221 kwargs['step_test_data'] = lambda: self.m.json.test_api.output([])
218 if results_directory: 222 if results_directory:
219 full_args.append('--results-directory=%s' % results_directory) 223 full_args.append('--results-directory=%s' % results_directory)
220 if test_launcher_summary_output: 224 if test_launcher_summary_output:
221 full_args.extend([ 225 full_args.extend([
222 '--test-launcher-summary-output', 226 '--test-launcher-summary-output',
223 test_launcher_summary_output 227 test_launcher_summary_output
224 ]) 228 ])
225 if flakiness_dash: 229 if flakiness_dash:
226 full_args.extend([ 230 full_args.extend([
227 '--generate-json-file', 231 '--generate-json-file',
(...skipping 443 matching lines...) Expand 10 before | Expand all | Expand 10 after
671 def get_compile_targets_for_scripts(self): 675 def get_compile_targets_for_scripts(self):
672 return self.m.python( 676 return self.m.python(
673 name='get compile targets for scripts', 677 name='get compile targets for scripts',
674 script=self.m.path['checkout'].join( 678 script=self.m.path['checkout'].join(
675 'testing', 'scripts', 'get_compile_targets.py'), 679 'testing', 'scripts', 'get_compile_targets.py'),
676 args=[ 680 args=[
677 '--output', self.m.json.output(), 681 '--output', self.m.json.output(),
678 '--', 682 '--',
679 ] + self.get_common_args_for_scripts(), 683 ] + self.get_common_args_for_scripts(),
680 step_test_data=lambda: self.m.json.test_api.output({})) 684 step_test_data=lambda: self.m.json.test_api.output({}))
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698