Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(155)

Side by Side Diff: tools/telemetry/telemetry/multi_page_benchmark_runner.py

Issue 12278015: [Telemetry] Reorganize everything. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Re-add shebangs. Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5 import csv
6 import logging
7 import os
8 import sys
9
10 from telemetry import all_page_actions # pylint: disable=W0611
11 from telemetry import block_page_benchmark_results
12 from telemetry import browser_finder
13 from telemetry import browser_options
14 from telemetry import csv_page_benchmark_results
15 from telemetry import discover
16 from telemetry import multi_page_benchmark
17 from telemetry import page_runner
18 from telemetry import page_set
19
20 def Main(benchmark_dir):
21 """Turns a MultiPageBenchmark into a command-line program.
22
23 Args:
24 benchmark_dir: Path to directory containing MultiPageBenchmarks.
25 """
26 benchmarks = discover.Discover(benchmark_dir, '',
27 multi_page_benchmark.MultiPageBenchmark)
28
29 # Naively find the benchmark. If we use the browser options parser, we run
30 # the risk of failing to parse if we use a benchmark-specific parameter.
31 benchmark_name = None
32 for arg in sys.argv:
33 if arg in benchmarks:
34 benchmark_name = arg
35
36 options = browser_options.BrowserOptions()
37 parser = options.CreateParser('%prog [options] <benchmark> <page_set>')
38
39 page_runner.PageRunner.AddCommandLineOptions(parser)
40 parser.add_option('--output-format',
41 dest='output_format',
42 default='csv',
43 help='Output format. Can be "csv" or "block". '
44 'Defaults to "%default".')
45 parser.add_option('-o', '--output',
46 dest='output_file',
47 help='Redirects output to a file. Defaults to stdout.')
48
49 benchmark = None
50 if benchmark_name is not None:
51 benchmark = benchmarks[benchmark_name]()
52 benchmark.AddCommandLineOptions(parser)
53
54 _, args = parser.parse_args()
55
56 if benchmark is None or len(args) != 2:
57 parser.print_usage()
58 import page_sets # pylint: disable=F0401
59 print >> sys.stderr, 'Available benchmarks:\n%s\n' % ',\n'.join(
60 sorted(benchmarks.keys()))
61 print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
62 sorted([os.path.relpath(f)
63 for f in page_sets.GetAllPageSetFilenames()]))
64 sys.exit(1)
65
66 ps = page_set.PageSet.FromFile(args[1])
67
68 benchmark.CustomizeBrowserOptions(options)
69 possible_browser = browser_finder.FindBrowser(options)
70 if not possible_browser:
71 print >> sys.stderr, """No browser found.\n
72 Use --browser=list to figure out which are available.\n"""
73 sys.exit(1)
74
75 if not options.output_file:
76 output_file = sys.stdout
77 elif options.output_file == '-':
78 output_file = sys.stdout
79 else:
80 output_file = open(os.path.expanduser(options.output_file), 'w')
81
82 if options.output_format == 'csv':
83 results = csv_page_benchmark_results.CsvPageBenchmarkResults(
84 csv.writer(output_file),
85 benchmark.results_are_the_same_on_every_page)
86 elif options.output_format in ('block', 'terminal-block'):
87 results = block_page_benchmark_results.BlockPageBenchmarkResults(
88 output_file)
89 else:
90 raise Exception('Invalid --output-format value: "%s". Valid values are '
91 '"csv" and "block".'
92 % options.output_format)
93
94 with page_runner.PageRunner(ps) as runner:
95 runner.Run(options, possible_browser, benchmark, results)
96 # When using an exact executable, assume it is a reference build for the
97 # purpose of outputting the perf results.
98 results.PrintSummary(options.browser_executable and '_ref' or '')
99
100 if len(results.page_failures):
101 logging.warning('Failed pages: %s', '\n'.join(
102 [failure['page'].url for failure in results.page_failures]))
103
104 if len(results.skipped_pages):
105 logging.warning('Skipped pages: %s', '\n'.join(
106 [skipped['page'].url for skipped in results.skipped_pages]))
107 return min(255, len(results.page_failures))
OLDNEW
« no previous file with comments | « tools/telemetry/telemetry/multi_page_benchmark.py ('k') | tools/telemetry/telemetry/multi_page_benchmark_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698