Index: tools/perf/perf_tools/multipage_benchmark_runner.py |
diff --git a/tools/perf/perf_tools/multipage_benchmark_runner.py b/tools/perf/perf_tools/multipage_benchmark_runner.py |
new file mode 100755 |
index 0000000000000000000000000000000000000000..2fcacdd430f548f913ded336e5edb2e96a637be7 |
--- /dev/null |
+++ b/tools/perf/perf_tools/multipage_benchmark_runner.py |
@@ -0,0 +1,76 @@ |
+#!/usr/bin/env python |
+# Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+import csv |
+import logging |
+import os |
+import sys |
+ |
+from chrome_remote_control import browser_finder |
+from chrome_remote_control import browser_options |
+from chrome_remote_control import multi_page_benchmark |
+from chrome_remote_control import page_runner |
+from chrome_remote_control import page_set |
+ |
+import perf_tools.first_paint_time_benchmark |
+import perf_tools.scrolling_benchmark |
+import perf_tools.skpicture_printer |
+import perf_tools.texture_upload_benchmark |
+ |
+# TODO(tonyg/nduca): Discover benchmarks automagically. |
+_BENCHMARKS = { |
+ 'first_paint_time_benchmark': |
+ perf_tools.first_paint_time_benchmark.FirstPaintTimeBenchmark, |
+ 'scrolling_benchmark': |
+ perf_tools.scrolling_benchmark.ScrollingBenchmark, |
+ 'skpicture_printer': |
+ perf_tools.skpicture_printer.SkPicturePrinter, |
+ 'texture_upload_benchmark': |
+ perf_tools.texture_upload_benchmark.TextureUploadBenchmark |
+} |
+ |
+ |
+def Main(): |
+ """Turns a MultiPageBenchmark into a command-line program. |
+ |
+ If args is not specified, sys.argv[1:] is used. |
+ """ |
+ options = browser_options.BrowserOptions() |
+ parser = options.CreateParser('%prog [options] <benchmark> <page_set>') |
+ _, args = parser.parse_args() |
+ |
+ if len(args) != 2 or args[0] not in _BENCHMARKS: |
+ parser.print_usage() |
+ import page_sets # pylint: disable=F0401 |
+ sys.stderr.write('Available benchmarks:\n%s\n\n' % ',\n'.join( |
dtu
2012/10/16 23:53:44
print >> sys.stderr, 'Available bench...'
tonyg
2012/10/17 00:13:44
Done.
|
+ _BENCHMARKS.keys())) |
+ sys.stderr.write('Available page_sets:\n%s\n\n' % ',\n'.join( |
+ [os.path.relpath(f) for f in page_sets.GetAllPageSetFilenames()])) |
+ sys.exit(1) |
+ |
+ benchmark = _BENCHMARKS[args[0]]() |
+ ps = page_set.PageSet.FromFile(args[1]) |
+ |
+ benchmark.AddOptions(parser) |
+ _, args = parser.parse_args() |
+ |
+ benchmark.CustomizeBrowserOptions(options) |
+ possible_browser = browser_finder.FindBrowser(options) |
+ if possible_browser == None: |
dtu
2012/10/16 23:53:44
if not possible_browser:
tonyg
2012/10/17 00:13:44
Done.
|
+ sys.stderr.write( |
+ 'No browser found.\n' + |
+ 'Use --browser=list to figure out which are available.\n') |
+ sys.exit(1) |
+ |
+ results = multi_page_benchmark.CsvBenchmarkResults(csv.writer(sys.stdout)) |
+ with page_runner.PageRunner(ps) as runner: |
+ runner.Run(options, possible_browser, benchmark, results) |
+ # When using an exact executable, assume it is a reference build for the |
+ # purpose of outputting the perf results. |
dtu
2012/10/16 23:53:44
Not sure about the validity of this assumption. Wh
tonyg
2012/10/17 00:13:44
It just changes the string that is printed in the
|
+ results.PrintSummary(options.browser_executable and '_ref' or '') |
+ |
+ if len(results.page_failures): |
+ logging.warning('Failed pages: %s', '\n'.join( |
+ [failure['page'].url for failure in results.page_failures])) |
+ return max(255, len(results.page_failures)) |