Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(132)

Side by Side Diff: scripts/slave/recipe_modules/chromium/api.py

Issue 745463003: Display telemetry chartjson output and result data on waterfall. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2013 The Chromium Authors. All rights reserved. 1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import re 5 import re
6 6
7 from slave import recipe_api 7 from slave import recipe_api
8 from slave import recipe_util 8 from slave import recipe_util
9 9
10 from . import builders 10 from . import builders
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 @recipe_util.returns_placeholder 172 @recipe_util.returns_placeholder
173 def test_launcher_filter(self, tests): 173 def test_launcher_filter(self, tests):
174 return TestLauncherFilterFileInputPlaceholder(self, tests) 174 return TestLauncherFilterFileInputPlaceholder(self, tests)
175 175
176 def runtest(self, test, args=None, xvfb=False, name=None, annotate=None, 176 def runtest(self, test, args=None, xvfb=False, name=None, annotate=None,
177 results_url=None, perf_dashboard_id=None, test_type=None, 177 results_url=None, perf_dashboard_id=None, test_type=None,
178 generate_json_file=False, results_directory=None, 178 generate_json_file=False, results_directory=None,
179 python_mode=False, spawn_dbus=True, parallel=False, 179 python_mode=False, spawn_dbus=True, parallel=False,
180 revision=None, webkit_revision=None, master_class_name=None, 180 revision=None, webkit_revision=None, master_class_name=None,
181 test_launcher_summary_output=None, flakiness_dash=None, 181 test_launcher_summary_output=None, flakiness_dash=None,
182 perf_id=None, perf_config=None, **kwargs): 182 perf_id=None, perf_config=None, chartjson_file=False, **kwargs):
183 """Return a runtest.py invocation.""" 183 """Return a runtest.py invocation."""
184 args = args or [] 184 args = args or []
185 assert isinstance(args, list) 185 assert isinstance(args, list)
186 186
187 t_name, ext = self.m.path.splitext(self.m.path.basename(test)) 187 t_name, ext = self.m.path.splitext(self.m.path.basename(test))
188 if not python_mode and self.m.platform.is_win and ext == '': 188 if not python_mode and self.m.platform.is_win and ext == '':
189 test += '.exe' 189 test += '.exe'
190 190
191 full_args = ['--target', self.c.build_config_fs] 191 full_args = ['--target', self.c.build_config_fs]
192 if self.c.TARGET_PLATFORM == 'ios': 192 if self.c.TARGET_PLATFORM == 'ios':
(...skipping 15 matching lines...) Expand all
208 full_args.append('--perf-dashboard-id=%s' % perf_dashboard_id) 208 full_args.append('--perf-dashboard-id=%s' % perf_dashboard_id)
209 if perf_id: 209 if perf_id:
210 full_args.append('--perf-id=%s' % perf_id) 210 full_args.append('--perf-id=%s' % perf_id)
211 if perf_config: 211 if perf_config:
212 full_args.extend(['--perf-config', perf_config]) 212 full_args.extend(['--perf-config', perf_config])
213 # This replaces the step_name that used to be sent via factory_properties. 213 # This replaces the step_name that used to be sent via factory_properties.
214 if test_type: 214 if test_type:
215 full_args.append('--test-type=%s' % test_type) 215 full_args.append('--test-type=%s' % test_type)
216 if generate_json_file: 216 if generate_json_file:
217 full_args.append('--generate-json-file') 217 full_args.append('--generate-json-file')
218 if chartjson_file:
219 full_args.append('--chartjson-file')
ghost stip (do not use) 2014/11/21 21:17:54 you'll have to add a test case somewhere that call
220 full_args.append(self.m.json.output())
218 if results_directory: 221 if results_directory:
219 full_args.append('--results-directory=%s' % results_directory) 222 full_args.append('--results-directory=%s' % results_directory)
220 if test_launcher_summary_output: 223 if test_launcher_summary_output:
221 full_args.extend([ 224 full_args.extend([
222 '--test-launcher-summary-output', 225 '--test-launcher-summary-output',
223 test_launcher_summary_output 226 test_launcher_summary_output
224 ]) 227 ])
225 if flakiness_dash: 228 if flakiness_dash:
226 full_args.extend([ 229 full_args.extend([
227 '--generate-json-file', 230 '--generate-json-file',
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after
669 def get_compile_targets_for_scripts(self): 672 def get_compile_targets_for_scripts(self):
670 return self.m.python( 673 return self.m.python(
671 name='get compile targets for scripts', 674 name='get compile targets for scripts',
672 script=self.m.path['checkout'].join( 675 script=self.m.path['checkout'].join(
673 'testing', 'scripts', 'get_compile_targets.py'), 676 'testing', 'scripts', 'get_compile_targets.py'),
674 args=[ 677 args=[
675 '--output', self.m.json.output(), 678 '--output', self.m.json.output(),
676 '--', 679 '--',
677 ] + self.get_common_args_for_scripts(), 680 ] + self.get_common_args_for_scripts(),
678 step_test_data=lambda: self.m.json.test_api.output({})) 681 step_test_data=lambda: self.m.json.test_api.output({}))
OLDNEW
« no previous file with comments | « no previous file | scripts/slave/recipe_modules/chromium/steps.py » ('j') | scripts/slave/telemetry_utils.py » ('J')

Powered by Google App Engine
This is Rietveld 408576698