OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2016 The Chromium Authors. All rights reserved. | 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in | 6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in |
7 the src/testing/buildbot directory. Maintaining these files by hand is | 7 the src/testing/buildbot directory. Maintaining these files by hand is |
8 too unwieldy. | 8 too unwieldy. |
9 """ | 9 """ |
10 | 10 |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
242 { | 242 { |
243 'gpu': '1002:9874', | 243 'gpu': '1002:9874', |
244 'os': 'Windows-10-10586', | 244 'os': 'Windows-10-10586', |
245 'device_ids': ['build171-b4', 'build186-b4'] | 245 'device_ids': ['build171-b4', 'build186-b4'] |
246 } | 246 } |
247 ]) | 247 ]) |
248 return waterfall | 248 return waterfall |
249 | 249 |
250 def get_waterfall_config(): | 250 def get_waterfall_config(): |
251 waterfall = {'builders':[], 'testers': {}} | 251 waterfall = {'builders':[], 'testers': {}} |
| 252 |
252 # These configurations are taken from chromium_perf.py in | 253 # These configurations are taken from chromium_perf.py in |
253 # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync | 254 # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync |
254 # to generate the correct json for each tester | 255 # to generate the correct json for each tester |
255 waterfall = add_tester( | 256 waterfall = add_tester( |
256 waterfall, 'Android Galaxy S5 Perf', | 257 waterfall, 'Android Galaxy S5 Perf', |
257 'android-galaxy-s5', 'android', target_bits=32, | 258 'android-galaxy-s5', 'android', target_bits=32, |
258 num_device_shards=7, num_host_shards=3) | 259 num_device_shards=7, num_host_shards=3) |
259 waterfall = add_tester( | 260 waterfall = add_tester( |
260 waterfall, 'Android Nexus5 Perf', 'android-nexus5', | 261 waterfall, 'Android Nexus5 Perf', 'android-nexus5', |
261 'android', target_bits=32, num_device_shards=7, num_host_shards=3) | 262 'android', target_bits=32, num_device_shards=7, num_host_shards=3) |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
301 waterfall, 'Mac 10.11 Perf', 'chromium-rel-mac11', | 302 waterfall, 'Mac 10.11 Perf', 'chromium-rel-mac11', |
302 'mac', num_host_shards=5) | 303 'mac', num_host_shards=5) |
303 waterfall = add_tester( | 304 waterfall = add_tester( |
304 waterfall, 'Mac 10.10 Perf', 'chromium-rel-mac10', | 305 waterfall, 'Mac 10.10 Perf', 'chromium-rel-mac10', |
305 'mac', num_host_shards=5) | 306 'mac', num_host_shards=5) |
306 waterfall = add_tester( | 307 waterfall = add_tester( |
307 waterfall, 'Mac Retina Perf', | 308 waterfall, 'Mac Retina Perf', |
308 'chromium-rel-mac-retina', 'mac', num_host_shards=5) | 309 'chromium-rel-mac-retina', 'mac', num_host_shards=5) |
309 waterfall = add_tester( | 310 waterfall = add_tester( |
310 waterfall, 'Mac HDD Perf', 'chromium-rel-mac-hdd', 'mac', num_host_shards=5) | 311 waterfall, 'Mac HDD Perf', 'chromium-rel-mac-hdd', 'mac', num_host_shards=5) |
| 312 waterfall = add_tester( |
| 313 waterfall, 'Mac Pro 10.11 Perf', |
| 314 'chromium-rel-mac11-pro', 'mac', |
| 315 swarming=[ |
| 316 { |
| 317 'gpu': '1002:6821', |
| 318 'os': 'Mac-10.11', |
| 319 'device_ids': [ |
| 320 'build128-b1', 'build129-b1', |
| 321 'build130-b1', 'build131-b1', 'build132-b1' |
| 322 ] |
| 323 } |
| 324 ]) |
| 325 waterfall = add_tester( |
| 326 waterfall, 'Mac Air 10.11 Perf', |
| 327 'chromium-rel-mac11-air', 'mac', |
| 328 swarming=[ |
| 329 { |
| 330 'gpu': '8086:1626', |
| 331 'os': 'Mac-10.11', |
| 332 'device_ids': [ |
| 333 'build123-b1', 'build124-b1', |
| 334 'build125-b1', 'build126-b1', 'build127-b1' |
| 335 ] |
| 336 } |
| 337 ]) |
311 | 338 |
312 waterfall = add_tester( | 339 waterfall = add_tester( |
313 waterfall, 'Linux Perf', 'linux-release', 'linux', num_host_shards=5) | 340 waterfall, 'Linux Perf', 'linux-release', 'linux', num_host_shards=5) |
314 | 341 |
315 return waterfall | 342 return waterfall |
316 | 343 |
317 def generate_telemetry_test(swarming_dimensions, benchmark_name, browser): | 344 def generate_telemetry_test(swarming_dimensions, benchmark_name, browser): |
318 # The step name must end in 'test' or 'tests' in order for the | 345 # The step name must end in 'test' or 'tests' in order for the |
319 # results to automatically show up on the flakiness dashboard. | 346 # results to automatically show up on the flakiness dashboard. |
320 # (At least, this was true some time ago.) Continue to use this | 347 # (At least, this was true some time ago.) Continue to use this |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
418 | 445 |
419 BENCHMARK_NAME_WHITELIST = set([ | 446 BENCHMARK_NAME_WHITELIST = set([ |
420 u'smoothness.top_25_smooth', | 447 u'smoothness.top_25_smooth', |
421 u'sunspider', | 448 u'sunspider', |
422 u'system_health.webview_startup', | 449 u'system_health.webview_startup', |
423 u'page_cycler_v2.intl_hi_ru', | 450 u'page_cycler_v2.intl_hi_ru', |
424 u'dromaeo.cssqueryjquery', | 451 u'dromaeo.cssqueryjquery', |
425 ]) | 452 ]) |
426 | 453 |
427 | 454 |
428 def current_benchmarks(): | 455 def current_benchmarks(use_whitelist): |
429 current_dir = os.path.dirname(__file__) | 456 current_dir = os.path.dirname(__file__) |
430 benchmarks_dir = os.path.join(current_dir, 'benchmarks') | 457 benchmarks_dir = os.path.join(current_dir, 'benchmarks') |
431 top_level_dir = os.path.dirname(benchmarks_dir) | 458 top_level_dir = os.path.dirname(benchmarks_dir) |
432 | 459 |
433 all_benchmarks = discover.DiscoverClasses( | 460 all_benchmarks = discover.DiscoverClasses( |
434 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | 461 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
435 index_by_class_name=True).values() | 462 index_by_class_name=True).values() |
436 return sorted(( | 463 if use_whitelist: |
437 bench for bench in all_benchmarks | 464 all_benchmarks = ( |
438 if bench.Name() in BENCHMARK_NAME_WHITELIST), key=lambda b: b.Name()) | 465 bench for bench in all_benchmarks |
| 466 if bench.Name() in BENCHMARK_NAME_WHITELIST) |
| 467 return sorted(all_benchmarks, key=lambda b: b.Name()) |
439 | 468 |
440 | 469 |
441 def generate_all_tests(waterfall): | 470 def generate_all_tests(waterfall, use_whitelist): |
442 tests = {} | 471 tests = {} |
443 for builder in waterfall['builders']: | 472 for builder in waterfall['builders']: |
444 tests[builder] = {} | 473 tests[builder] = {} |
445 all_benchmarks = current_benchmarks() | 474 all_benchmarks = current_benchmarks(use_whitelist) |
446 | 475 |
447 for name, config in waterfall['testers'].iteritems(): | 476 for name, config in waterfall['testers'].iteritems(): |
448 if config.get('swarming', False): | 477 if config.get('swarming', False): |
449 # Right now we are only generating benchmarks for the fyi waterfall | 478 # Right now we are only generating benchmarks for the fyi waterfall |
450 isolated_scripts = generate_telemetry_tests(config, all_benchmarks) | 479 isolated_scripts = generate_telemetry_tests(config, all_benchmarks) |
451 tests[name] = { | 480 tests[name] = { |
452 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) | 481 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) |
453 } | 482 } |
454 else: | 483 else: |
455 # scripts are only currently run in addition to the main waterfall. They | 484 # scripts are only currently run in addition to the main waterfall. They |
(...skipping 19 matching lines...) Expand all Loading... |
475 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) | 504 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) |
476 fp.write('\n') | 505 fp.write('\n') |
477 | 506 |
478 | 507 |
479 def main(): | 508 def main(): |
480 waterfall = get_waterfall_config() | 509 waterfall = get_waterfall_config() |
481 waterfall['name'] = 'chromium.perf' | 510 waterfall['name'] = 'chromium.perf' |
482 fyi_waterfall = get_fyi_waterfall_config() | 511 fyi_waterfall = get_fyi_waterfall_config() |
483 fyi_waterfall['name'] = 'chromium.perf.fyi' | 512 fyi_waterfall['name'] = 'chromium.perf.fyi' |
484 | 513 |
485 generate_all_tests(fyi_waterfall) | 514 generate_all_tests(fyi_waterfall, True) |
486 generate_all_tests(waterfall) | 515 generate_all_tests(waterfall, False) |
487 return 0 | 516 return 0 |
488 | 517 |
489 if __name__ == "__main__": | 518 if __name__ == "__main__": |
490 sys.exit(main()) | 519 sys.exit(main()) |
OLD | NEW |