OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 """Script to parse perf data from Chrome Endure test executions, to be graphed. |
| 7 |
| 8 This script connects via HTTP to a buildbot master in order to scrape and parse |
| 9 perf data from Chrome Endure tests that have been run. The perf data is then |
| 10 stored in local text files to be graphed by the Chrome Endure graphing code. |
| 11 |
| 12 It is assumed that any Chrome Endure tests that show up on the waterfall have |
| 13 names that are of the following form: |
| 14 |
| 15 "endure_<webapp_name>_test <test_name>" (non-Web Page Replay tests) |
| 16 |
| 17 or |
| 18 |
| 19 "endure_<webapp_name>_wpr_test <test_name>" (Web Page Replay tests) |
| 20 |
| 21 For example: "endure_gmail_wpr_test testGmailComposeDiscard" |
| 22 """ |
| 23 |
| 24 import getpass |
| 25 import logging |
| 26 import optparse |
| 27 import os |
| 28 import re |
| 29 import simplejson |
| 30 import socket |
| 31 import sys |
| 32 import time |
| 33 import urllib |
| 34 import urllib2 |
| 35 |
| 36 |
| 37 CHROME_ENDURE_SLAVE_NAMES = [ |
| 38 'Linux (perf0)', |
| 39 'Linux (perf1)', |
| 40 'Linux (perf2)', |
| 41 'Linux (perf3)', |
| 42 'Linux (perf4)', |
| 43 ] |
| 44 |
| 45 BUILDER_URL_BASE = 'http://build.chromium.org/p/chromium.pyauto/builders/' |
| 46 LAST_BUILD_NUM_PROCESSED_FILE = os.path.join(os.path.dirname(__file__), |
| 47 '_parser_last_processed.txt') |
| 48 LOCAL_GRAPH_DIR = '/home/%s/www/chrome_endure_clean' % getpass.getuser() |
| 49 |
| 50 |
| 51 def SetupBaseGraphDirIfNeeded(webapp_name, test_name, dest_dir): |
| 52 """Sets up the directory containing results for a particular test, if needed. |
| 53 |
| 54 Args: |
| 55 webapp_name: The string name of the webapp associated with the given test. |
| 56 test_name: The string name of the test. |
| 57 dest_dir: The name of the destination directory that needs to be set up. |
| 58 """ |
| 59 if not os.path.exists(dest_dir): |
| 60 os.mkdir(dest_dir) # Test name directory. |
| 61 os.chmod(dest_dir, 0755) |
| 62 |
| 63 # Create config file. |
| 64 config_file = os.path.join(dest_dir, 'config.js') |
| 65 if not os.path.exists(config_file): |
| 66 with open(config_file, 'w') as f: |
| 67 f.write('var Config = {\n') |
| 68 f.write('buildslave: "Chrome Endure Bots",\n') |
| 69 f.write('title: "Chrome Endure %s Test: %s",\n' % (webapp_name.upper(), |
| 70 test_name)) |
| 71 f.write('};\n') |
| 72 os.chmod(config_file, 0755) |
| 73 |
| 74 # Set up symbolic links to the real graphing files. |
| 75 link_file = os.path.join(dest_dir, 'index.html') |
| 76 if not os.path.exists(link_file): |
| 77 os.symlink('../../endure_plotter.html', link_file) |
| 78 link_file = os.path.join(dest_dir, 'endure_plotter.js') |
| 79 if not os.path.exists(link_file): |
| 80 os.symlink('../../endure_plotter.js', link_file) |
| 81 link_file = os.path.join(dest_dir, 'js') |
| 82 if not os.path.exists(link_file): |
| 83 os.symlink('../../js', link_file) |
| 84 |
| 85 |
| 86 def WriteToDataFile(new_line, existing_lines, revision, data_file): |
| 87 """Writes a new entry to an existing perf data file to be graphed. |
| 88 |
| 89 If there's an existing line with the same revision number, overwrite its data |
| 90 with the new line. Else, prepend the info for the new revision. |
| 91 |
| 92 Args: |
| 93 new_line: A dictionary representing perf information for the new entry. |
| 94 existing_lines: A list of string lines from the existing perf data file. |
| 95 revision: The string revision number associated with the new perf entry. |
| 96 data_file: The string name of the perf data file to which to write. |
| 97 """ |
| 98 overwritten = False |
| 99 for i, line in enumerate(existing_lines): |
| 100 line_dict = simplejson.loads(line) |
| 101 if line_dict['rev'] == revision: |
| 102 existing_lines[i] = simplejson.dumps(new_line) |
| 103 overwritten = True |
| 104 break |
| 105 elif int(line_dict['rev']) < int(revision): |
| 106 break |
| 107 if not overwritten: |
| 108 existing_lines.insert(0, simplejson.dumps(new_line)) |
| 109 |
| 110 with open(data_file, 'w') as f: |
| 111 f.write('\n'.join(existing_lines)) |
| 112 os.chmod(data_file, 0755) |
| 113 |
| 114 |
| 115 def OutputPerfData(revision, graph_name, description, value, units, units_x, |
| 116 dest_dir): |
| 117 """Outputs perf data to a local text file to be graphed. |
| 118 |
| 119 Args: |
| 120 revision: The string revision number associated with the perf data. |
| 121 graph_name: The string name of the graph on which to plot the data. |
| 122 description: A string description of the perf value to be graphed. |
| 123 value: Either a single data value to be graphed, or a list of 2-tuples |
| 124 representing (x, y) points to be graphed for long-running tests. |
| 125 units: The string description for the y-axis units on the graph. |
| 126 units_x: The string description for the x-axis units on the graph. Should |
| 127 be set to None if the results are not for long-running graphs. |
| 128 dest_dir: The name of the destination directory to which to write. |
| 129 """ |
| 130 # Update graphs.dat, which contains metadata associated with each graph. |
| 131 existing_graphs = [] |
| 132 graphs_file = os.path.join(dest_dir, 'graphs.dat') |
| 133 if os.path.exists(graphs_file): |
| 134 with open(graphs_file, 'r') as f: |
| 135 existing_graphs = simplejson.loads(f.read()) |
| 136 is_new_graph = True |
| 137 for graph in existing_graphs: |
| 138 if graph['name'] == graph_name: |
| 139 is_new_graph = False |
| 140 break |
| 141 if is_new_graph: |
| 142 new_graph = { |
| 143 'name': graph_name, |
| 144 'units': units, |
| 145 'important': False, |
| 146 } |
| 147 if units_x: |
| 148 new_graph['units_x'] = units_x |
| 149 existing_graphs.append(new_graph) |
| 150 existing_graphs = sorted(existing_graphs, key=lambda x: x['name']) |
| 151 with open(graphs_file, 'w') as f: |
| 152 f.write(simplejson.dumps(existing_graphs, indent=2)) |
| 153 os.chmod(graphs_file, 0755) |
| 154 |
| 155 # Update summary data file, containing the actual data to be graphed. |
| 156 data_file_name = graph_name + '-summary.dat' |
| 157 existing_lines = [] |
| 158 data_file = os.path.join(dest_dir, data_file_name) |
| 159 if os.path.exists(data_file): |
| 160 with open(data_file, 'r') as f: |
| 161 existing_lines = f.readlines() |
| 162 existing_lines = map(lambda x: x.strip(), existing_lines) |
| 163 if units_x: |
| 164 points = [] |
| 165 for point in value: |
| 166 points.append([str(point[0]), str(point[1])]) |
| 167 new_traces = { |
| 168 description: points |
| 169 } |
| 170 else: |
| 171 new_traces = { |
| 172 description: [str(value), str(0.0)] |
| 173 } |
| 174 new_line = { |
| 175 'traces': new_traces, |
| 176 'rev': revision |
| 177 } |
| 178 |
| 179 WriteToDataFile(new_line, existing_lines, revision, data_file) |
| 180 |
| 181 |
| 182 def OutputEventData(revision, description, event_list, dest_dir): |
| 183 """Outputs event data to a local text file to be graphed. |
| 184 |
| 185 Args: |
| 186 revision: The string revision number associated with the event data. |
| 187 description: A string description of the event values to be graphed. |
| 188 event_list: An array of tuples representing event data to be graphed. |
| 189 dest_dir: The name of the destination directory to which to write. |
| 190 """ |
| 191 data_file_name = '_EVENT_-summary.dat' |
| 192 existing_lines = [] |
| 193 data_file = os.path.join(dest_dir, data_file_name) |
| 194 if os.path.exists(data_file): |
| 195 with open(data_file, 'r') as f: |
| 196 existing_lines = f.readlines() |
| 197 existing_lines = map(lambda x: x.strip(), existing_lines) |
| 198 |
| 199 value_list = [] |
| 200 for event_time, event_data in event_list: |
| 201 value_list.append([str(event_time), event_data]) |
| 202 new_events = { |
| 203 description: value_list |
| 204 } |
| 205 |
| 206 new_line = { |
| 207 'rev': revision, |
| 208 'events': new_events |
| 209 } |
| 210 |
| 211 WriteToDataFile(new_line, existing_lines, revision, data_file) |
| 212 |
| 213 |
| 214 def UpdatePerfDataForSlaveAndBuild(slave_info, build_num): |
| 215 """Process updated perf data for a particular slave and build number. |
| 216 |
| 217 Args: |
| 218 slave_info: A dictionary containing information about the slave to process. |
| 219 build_num: The particular build number on the slave to process. |
| 220 |
| 221 Returns: |
| 222 True if the perf data for the given slave/build is updated properly, or |
| 223 False if any critical error occurred. |
| 224 """ |
| 225 logging.debug(' %s, build %d.', slave_info['slave_name'], build_num) |
| 226 build_url = (BUILDER_URL_BASE + urllib.quote(slave_info['slave_name']) + |
| 227 '/builds/' + str(build_num)) |
| 228 |
| 229 url_contents = '' |
| 230 fp = None |
| 231 try: |
| 232 fp = urllib2.urlopen(build_url, timeout=60) |
| 233 url_contents = fp.read() |
| 234 except urllib2.URLError, e: |
| 235 logging.exception('Error reading build URL "%s": %s', build_url, str(e)) |
| 236 return False |
| 237 finally: |
| 238 if fp: |
| 239 fp.close() |
| 240 |
| 241 # Extract the revision number for this build. |
| 242 revision = re.findall( |
| 243 r'<td class="left">got_revision</td>\s+<td>(\d+)</td>\s+<td>Source</td>', |
| 244 url_contents) |
| 245 if not revision: |
| 246 logging.warning('Could not get revision number. Assuming build is too new ' |
| 247 'or was cancelled.') |
| 248 return True # Do not fail the script in this case; continue with next one. |
| 249 revision = revision[0] |
| 250 |
| 251 # Extract any Chrome Endure stdio links for this build. |
| 252 stdio_urls = [] |
| 253 links = re.findall(r'(/steps/endure[^/]+/logs/stdio)', url_contents) |
| 254 for link in links: |
| 255 link_unquoted = urllib.unquote(link) |
| 256 found_wpr_result = False |
| 257 match = re.findall(r'endure_([^_]+)_test ([^/]+)/', link_unquoted) |
| 258 if not match: |
| 259 match = re.findall(r'endure_([^_]+)_wpr_test ([^/]+)/', link_unquoted) |
| 260 if match: |
| 261 found_wpr_result = True |
| 262 else: |
| 263 logging.error('Test name not in expected format in link: ' + |
| 264 link_unquoted) |
| 265 return False |
| 266 match = match[0] |
| 267 webapp_name = match[0] + '_wpr' if found_wpr_result else match[0] |
| 268 test_name = match[1] |
| 269 stdio_urls.append({ |
| 270 'link': build_url + link + '/text', |
| 271 'webapp_name': webapp_name, |
| 272 'test_name': test_name, |
| 273 }) |
| 274 |
| 275 # For each test stdio link, parse it and look for new perf data to be graphed. |
| 276 for stdio_url_data in stdio_urls: |
| 277 stdio_url = stdio_url_data['link'] |
| 278 url_contents = '' |
| 279 fp = None |
| 280 try: |
| 281 fp = urllib2.urlopen(stdio_url, timeout=60) |
| 282 # Since in-progress test output is sent chunked, there's no EOF. We need |
| 283 # to specially handle this case so we don't hang here waiting for the |
| 284 # test to complete. |
| 285 start_time = time.time() |
| 286 while True: |
| 287 data = fp.read(1024) |
| 288 if not data: |
| 289 break |
| 290 url_contents += data |
| 291 if time.time() - start_time >= 30: # Read for at most 30 seconds. |
| 292 break |
| 293 except (urllib2.URLError, socket.error), e: |
| 294 # Issue warning but continue to the next stdio link. |
| 295 logging.warning('Error reading test stdio URL "%s": %s', stdio_url, |
| 296 str(e)) |
| 297 finally: |
| 298 if fp: |
| 299 fp.close() |
| 300 |
| 301 perf_data_raw = [] |
| 302 |
| 303 def AppendRawPerfData(graph_name, description, value, units, units_x, |
| 304 webapp_name, test_name): |
| 305 perf_data_raw.append({ |
| 306 'graph_name': graph_name, |
| 307 'description': description, |
| 308 'value': value, |
| 309 'units': units, |
| 310 'units_x': units_x, |
| 311 'webapp_name': webapp_name, |
| 312 'test_name': test_name, |
| 313 }) |
| 314 |
| 315 # First scan for short-running perf test results. |
| 316 for match in re.findall( |
| 317 r'RESULT ([^:]+): ([^=]+)= ([-\d\.]+) (\S+)', url_contents): |
| 318 AppendRawPerfData(match[0], match[1], eval(match[2]), match[3], None, |
| 319 stdio_url_data['webapp_name'], |
| 320 stdio_url_data['webapp_name']) |
| 321 |
| 322 # Next scan for long-running perf test results. |
| 323 for match in re.findall( |
| 324 r'RESULT ([^:]+): ([^=]+)= (\[[^\]]+\]) (\S+) (\S+)', url_contents): |
| 325 AppendRawPerfData(match[0], match[1], eval(match[2]), match[3], match[4], |
| 326 stdio_url_data['webapp_name'], |
| 327 stdio_url_data['test_name']) |
| 328 |
| 329 # Next scan for events in the test results. |
| 330 for match in re.findall( |
| 331 r'RESULT _EVENT_: ([^=]+)= (\[[^\]]+\])', url_contents): |
| 332 AppendRawPerfData('_EVENT_', match[0], eval(match[1]), None, None, |
| 333 stdio_url_data['webapp_name'], |
| 334 stdio_url_data['test_name']) |
| 335 |
| 336 # For each graph_name/description pair that refers to a long-running test |
| 337 # result or an event, concatenate all the results together (assume results |
| 338 # in the input file are in the correct order). For short-running test |
| 339 # results, keep just one if more than one is specified. |
| 340 perf_data = {} # Maps a graph-line key to a perf data dictionary. |
| 341 for data in perf_data_raw: |
| 342 key = data['graph_name'] + '|' + data['description'] |
| 343 if data['graph_name'] != '_EVENT_' and not data['units_x']: |
| 344 # Short-running test result. |
| 345 perf_data[key] = data |
| 346 else: |
| 347 # Long-running test result or event. |
| 348 if key in perf_data: |
| 349 perf_data[key]['value'] += data['value'] |
| 350 else: |
| 351 perf_data[key] = data |
| 352 |
| 353 # Finally, for each graph-line in |perf_data|, update the associated local |
| 354 # graph data files if necessary. |
| 355 for perf_data_key in perf_data: |
| 356 perf_data_dict = perf_data[perf_data_key] |
| 357 |
| 358 dest_dir = os.path.join(LOCAL_GRAPH_DIR, perf_data_dict['webapp_name']) |
| 359 if not os.path.exists(dest_dir): |
| 360 os.mkdir(dest_dir) # Webapp name directory. |
| 361 os.chmod(dest_dir, 0755) |
| 362 dest_dir = os.path.join(dest_dir, perf_data_dict['test_name']) |
| 363 |
| 364 SetupBaseGraphDirIfNeeded(perf_data_dict['webapp_name'], |
| 365 perf_data_dict['test_name'], dest_dir) |
| 366 if perf_data_dict['graph_name'] == '_EVENT_': |
| 367 OutputEventData(revision, perf_data_dict['description'], |
| 368 perf_data_dict['value'], dest_dir) |
| 369 else: |
| 370 OutputPerfData(revision, perf_data_dict['graph_name'], |
| 371 perf_data_dict['description'], perf_data_dict['value'], |
| 372 perf_data_dict['units'], perf_data_dict['units_x'], |
| 373 dest_dir) |
| 374 |
| 375 return True |
| 376 |
| 377 |
| 378 def UpdatePerfDataFiles(): |
| 379 """Updates the Chrome Endure graph data files with the latest test results. |
| 380 |
| 381 For each known Chrome Endure slave, we scan its latest test results looking |
| 382 for any new test data. Any new data that is found is then appended to the |
| 383 data files used to display the Chrome Endure graphs. |
| 384 |
| 385 Returns: |
| 386 True if all graph data files are updated properly, or |
| 387 False if any error occurred. |
| 388 """ |
| 389 slave_list = [] |
| 390 for slave_name in CHROME_ENDURE_SLAVE_NAMES: |
| 391 slave_info = {} |
| 392 slave_info['slave_name'] = slave_name |
| 393 slave_info['most_recent_build_num'] = None |
| 394 slave_info['last_processed_build_num'] = None |
| 395 slave_list.append(slave_info) |
| 396 |
| 397 # Identify the most recent build number for each slave. |
| 398 logging.debug('Searching for latest build numbers for each slave...') |
| 399 for slave in slave_list: |
| 400 slave_name = slave['slave_name'] |
| 401 slave_url = BUILDER_URL_BASE + urllib.quote(slave_name) |
| 402 |
| 403 url_contents = '' |
| 404 fp = None |
| 405 try: |
| 406 fp = urllib2.urlopen(slave_url, timeout=60) |
| 407 url_contents = fp.read() |
| 408 except urllib2.URLError, e: |
| 409 logging.exception('Error reading builder URL: %s', str(e)) |
| 410 return False |
| 411 finally: |
| 412 if fp: |
| 413 fp.close() |
| 414 |
| 415 matches = re.findall(r'/(\d+)/stop', url_contents) |
| 416 if matches: |
| 417 slave['most_recent_build_num'] = int(matches[0]) |
| 418 else: |
| 419 matches = re.findall(r'#(\d+)</a></td>', url_contents) |
| 420 if matches: |
| 421 slave['most_recent_build_num'] = sorted(map(int, matches), |
| 422 reverse=True)[0] |
| 423 else: |
| 424 logging.error('Could not identify latest build number for slave %s.', |
| 425 slave_name) |
| 426 return False |
| 427 |
| 428 logging.debug('%s most recent build number: %s', slave_name, |
| 429 slave['most_recent_build_num']) |
| 430 |
| 431 # Identify the last-processed build number for each slave. |
| 432 logging.debug('Identifying last processed build numbers...') |
| 433 if not os.path.exists(LAST_BUILD_NUM_PROCESSED_FILE): |
| 434 for slave_info in slave_list: |
| 435 slave_info['last_processed_build_num'] = 0 |
| 436 else: |
| 437 with open(LAST_BUILD_NUM_PROCESSED_FILE, 'r') as fp: |
| 438 file_contents = fp.read() |
| 439 for match in re.findall(r'([^:]+):(\d+)', file_contents): |
| 440 slave_name = match[0].strip() |
| 441 last_processed_build_num = match[1].strip() |
| 442 for slave_info in slave_list: |
| 443 if slave_info['slave_name'] == slave_name: |
| 444 slave_info['last_processed_build_num'] = int( |
| 445 last_processed_build_num) |
| 446 for slave_info in slave_list: |
| 447 if not slave_info['last_processed_build_num']: |
| 448 slave_info['last_processed_build_num'] = 0 |
| 449 logging.debug('Done identifying last processed build numbers.') |
| 450 |
| 451 # For each Chrome Endure slave, process each build in-between the last |
| 452 # processed build num and the most recent build num, inclusive. To process |
| 453 # each one, first get the revision number for that build, then scan the test |
| 454 # result stdio for any performance data, and add any new performance data to |
| 455 # local files to be graphed. |
| 456 for slave_info in slave_list: |
| 457 logging.debug('Processing %s, builds %d-%d...', |
| 458 slave_info['slave_name'], |
| 459 slave_info['last_processed_build_num'], |
| 460 slave_info['most_recent_build_num']) |
| 461 curr_build_num = slave_info['last_processed_build_num'] |
| 462 while curr_build_num <= slave_info['most_recent_build_num']: |
| 463 if not UpdatePerfDataForSlaveAndBuild(slave_info, curr_build_num): |
| 464 return False |
| 465 curr_build_num += 1 |
| 466 |
| 467 # Log the newly-processed build numbers. |
| 468 logging.debug('Logging the newly-processed build numbers...') |
| 469 with open(LAST_BUILD_NUM_PROCESSED_FILE, 'w') as f: |
| 470 for slave_info in slave_list: |
| 471 f.write('%s:%s\n' % (slave_info['slave_name'], |
| 472 slave_info['most_recent_build_num'])) |
| 473 |
| 474 return True |
| 475 |
| 476 |
| 477 def GenerateIndexPage(): |
| 478 """Generates a summary (landing) page for the Chrome Endure graphs.""" |
| 479 logging.debug('Generating new index.html page...') |
| 480 |
| 481 # Page header. |
| 482 page = """ |
| 483 <html> |
| 484 |
| 485 <head> |
| 486 <title>Chrome Endure Overview</title> |
| 487 <script language="javascript"> |
| 488 function DisplayGraph(name, graph) { |
| 489 document.write( |
| 490 '<td><iframe scrolling="no" height="438" width="700" src="'); |
| 491 document.write(name); |
| 492 document.write('"></iframe></td>'); |
| 493 } |
| 494 </script> |
| 495 </head> |
| 496 |
| 497 <body> |
| 498 <center> |
| 499 |
| 500 <h1> |
| 501 Chrome Endure |
| 502 </h1> |
| 503 """ |
| 504 # Print current time. |
| 505 page += '<p>Updated: %s</p>\n' % ( |
| 506 time.strftime('%A, %B %d, %Y at %I:%M:%S %p %Z')) |
| 507 |
| 508 # Links for each webapp. |
| 509 webapp_names = [x for x in os.listdir(LOCAL_GRAPH_DIR) if |
| 510 x not in ['js', 'old_data'] and |
| 511 os.path.isdir(os.path.join(LOCAL_GRAPH_DIR, x))] |
| 512 webapp_names = sorted(webapp_names) |
| 513 |
| 514 page += '<p> [' |
| 515 for i, name in enumerate(webapp_names): |
| 516 page += '<a href="#%s">%s</a>' % (name.upper(), name.upper()) |
| 517 if i < len(webapp_names) - 1: |
| 518 page += ' | ' |
| 519 page += '] </p>\n' |
| 520 |
| 521 # Print out the data for each webapp. |
| 522 for webapp_name in webapp_names: |
| 523 page += '\n<h1 id="%s">%s</h1>\n' % (webapp_name.upper(), |
| 524 webapp_name.upper()) |
| 525 |
| 526 # Links for each test for this webapp. |
| 527 test_names = [x for x in |
| 528 os.listdir(os.path.join(LOCAL_GRAPH_DIR, webapp_name))] |
| 529 test_names = sorted(test_names) |
| 530 |
| 531 page += '<p> [' |
| 532 for i, name in enumerate(test_names): |
| 533 page += '<a href="#%s">%s</a>' % (name, name) |
| 534 if i < len(test_names) - 1: |
| 535 page += ' | ' |
| 536 page += '] </p>\n' |
| 537 |
| 538 # Print out the data for each test for this webapp. |
| 539 for test_name in test_names: |
| 540 # Get the set of graph names for this test. |
| 541 graph_names = [x[:x.find('-summary.dat')] for x in |
| 542 os.listdir(os.path.join(LOCAL_GRAPH_DIR, |
| 543 webapp_name, test_name)) |
| 544 if '-summary.dat' in x and '_EVENT_' not in x] |
| 545 graph_names = sorted(graph_names) |
| 546 |
| 547 page += '<h2 id="%s">%s</h2>\n' % (test_name, test_name) |
| 548 page += '<table>\n' |
| 549 |
| 550 for i, graph_name in enumerate(graph_names): |
| 551 if i % 2 == 0: |
| 552 page += ' <tr>\n' |
| 553 page += (' <script>DisplayGraph("%s/%s?graph=%s&lookout=1");' |
| 554 '</script>\n' % (webapp_name, test_name, graph_name)) |
| 555 if i % 2 == 1: |
| 556 page += ' </tr>\n' |
| 557 if len(graph_names) % 2 == 1: |
| 558 page += ' </tr>\n' |
| 559 page += '</table>\n' |
| 560 |
| 561 # Page footer. |
| 562 page += """ |
| 563 </center> |
| 564 </body> |
| 565 |
| 566 </html> |
| 567 """ |
| 568 |
| 569 index_file = os.path.join(LOCAL_GRAPH_DIR, 'index.html') |
| 570 with open(index_file, 'w') as f: |
| 571 f.write(page) |
| 572 os.chmod(index_file, 0755) |
| 573 |
| 574 |
| 575 def main(): |
| 576 parser = optparse.OptionParser() |
| 577 parser.add_option( |
| 578 '-v', '--verbose', action='store_true', default=False, |
| 579 help='Use verbose logging.') |
| 580 options, _ = parser.parse_args(sys.argv) |
| 581 |
| 582 logging_level = logging.DEBUG if options.verbose else logging.INFO |
| 583 logging.basicConfig(level=logging_level, |
| 584 format='[%(asctime)s] %(levelname)s: %(message)s') |
| 585 |
| 586 success = UpdatePerfDataFiles() |
| 587 if not success: |
| 588 logging.error('Failed to update perf data files.') |
| 589 sys.exit(0) |
| 590 |
| 591 GenerateIndexPage() |
| 592 logging.debug('All done!') |
| 593 |
| 594 |
| 595 if __name__ == '__main__': |
| 596 main() |
OLD | NEW |