| OLD | NEW |
| 1 #!/usr/bin/python | 1 #!/usr/bin/python |
| 2 | 2 |
| 3 # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 3 # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 4 # for details. All rights reserved. Use of this source code is governed by a | 4 # for details. All rights reserved. Use of this source code is governed by a |
| 5 # BSD-style license that can be found in the LICENSE file. | 5 # BSD-style license that can be found in the LICENSE file. |
| 6 | 6 |
| 7 import datetime | 7 import datetime |
| 8 import math | 8 import math |
| 9 import optparse | 9 import optparse |
| 10 import os | 10 import os |
| 11 from os.path import dirname, abspath | 11 from os.path import dirname, abspath |
| 12 import pickle | 12 import pickle |
| 13 import platform | 13 import platform |
| 14 import re | 14 import re |
| 15 import shutil | 15 import shutil |
| 16 import stat | 16 import stat |
| 17 import subprocess | 17 import subprocess |
| 18 import sys | 18 import sys |
| 19 import time | 19 import time |
| 20 | 20 |
| 21 TOOLS_PATH = os.path.join(dirname(dirname(dirname(abspath(__file__))))) | 21 TOOLS_PATH = os.path.join(dirname(dirname(dirname(abspath(__file__))))) |
| 22 DART_INSTALL_LOCATION = abspath(os.path.join(dirname(abspath(__file__)), | 22 TOP_LEVEL_DIR = abspath(os.path.join(dirname(abspath(__file__)), '..', '..', |
| 23 '..', '..', '..')) | 23 '..')) |
| 24 DART_REPO_LOC = abspath(os.path.join(dirname(abspath(__file__)), '..', '..', |
| 25 '..', '..', '..', |
| 26 'dart_checkout_for_perf_testing', |
| 27 'dart')) |
| 24 sys.path.append(TOOLS_PATH) | 28 sys.path.append(TOOLS_PATH) |
| 25 sys.path.append(os.path.join(DART_INSTALL_LOCATION, 'internal', 'tests')) | 29 sys.path.append(os.path.join(TOP_LEVEL_DIR, 'internal', 'tests')) |
| 26 import post_results | 30 import post_results |
| 27 import utils | 31 import utils |
| 28 | 32 |
| 29 """This script runs to track performance and size progress of | 33 """This script runs to track performance and size progress of |
| 30 different svn revisions. It tests to see if there a newer version of the code on | 34 different svn revisions. It tests to see if there a newer version of the code on |
| 31 the server, and will sync and run the performance tests if so.""" | 35 the server, and will sync and run the performance tests if so.""" |
| 32 class TestRunner(object): | 36 class TestRunner(object): |
| 33 | 37 |
| 34 def __init__(self): | 38 def __init__(self): |
| 35 self.verbose = False | 39 self.verbose = False |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 70 print stderr | 74 print stderr |
| 71 return output, stderr | 75 return output, stderr |
| 72 | 76 |
| 73 def time_cmd(self, cmd): | 77 def time_cmd(self, cmd): |
| 74 """Determine the amount of (real) time it takes to execute a given | 78 """Determine the amount of (real) time it takes to execute a given |
| 75 command.""" | 79 command.""" |
| 76 start = time.time() | 80 start = time.time() |
| 77 self.run_cmd(cmd) | 81 self.run_cmd(cmd) |
| 78 return time.time() - start | 82 return time.time() - start |
| 79 | 83 |
| 84 def clear_out_unversioned_files(self): |
| 85 """Remove all files that are unversioned by svn.""" |
| 86 if os.path.exists(DART_REPO_LOC): |
| 87 os.chdir(DART_REPO_LOC) |
| 88 results, _ = self.run_cmd(['svn', 'st']) |
| 89 for line in results.split('\n'): |
| 90 if line.startswith('?'): |
| 91 to_remove = line.split()[1] |
| 92 if os.path.isdir(to_remove): |
| 93 shutil.rmtree(to_remove)#, ignore_errors=True) |
| 94 else: |
| 95 os.remove(to_remove) |
| 96 |
| 80 def sync_and_build(self, suites, revision_num=''): | 97 def sync_and_build(self, suites, revision_num=''): |
| 81 """Make sure we have the latest version of of the repo, and build it. We | 98 """Make sure we have the latest version of of the repo, and build it. We |
| 82 begin and end standing in DART_INSTALL_LOCATION. | 99 begin and end standing in DART_REPO_LOC. |
| 83 | 100 |
| 84 Args: | 101 Args: |
| 85 suites: The set of suites that we wish to build. | 102 suites: The set of suites that we wish to build. |
| 86 | 103 |
| 87 Returns: | 104 Returns: |
| 88 err_code = 1 if there was a problem building.""" | 105 err_code = 1 if there was a problem building.""" |
| 89 os.chdir(DART_INSTALL_LOCATION) | 106 os.chdir(dirname(DART_REPO_LOC)) |
| 90 | 107 self.clear_out_unversioned_files() |
| 91 if revision_num == '': | 108 if revision_num == '': |
| 92 self.run_cmd(['gclient', 'sync']) | 109 self.run_cmd(['gclient', 'sync']) |
| 93 else: | 110 else: |
| 94 self.run_cmd(['gclient', 'sync', '-r', revision_num, '-t']) | 111 self.run_cmd(['gclient', 'sync', '-r', revision_num, '-t']) |
| 112 |
| 113 shutil.copytree(os.path.join(TOP_LEVEL_DIR, 'internal'), |
| 114 os.path.join(DART_REPO_LOC, 'internal')) |
| 115 shutil.copy(os.path.join(TOP_LEVEL_DIR, 'tools', 'get_archive.py'), |
| 116 os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py')) |
| 95 | 117 |
| 96 if revision_num == '': | 118 if revision_num == '': |
| 97 revision_num = search_for_revision(['svn', 'info']) | 119 revision_num = search_for_revision(['svn', 'info']) |
| 98 if revision_num == -1: | 120 if revision_num == -1: |
| 99 revision_num = search_for_revision(['git', 'svn', 'info']) | 121 revision_num = search_for_revision(['git', 'svn', 'info']) |
| 100 _, stderr = self.run_cmd(['python', os.path.join(DART_INSTALL_LOCATION, | 122 |
| 101 'tools', 'get_archive.py'), 'sdk', '-r', revision_num]) | 123 get_archive_path = os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py') |
| 102 if 'InvalidUriError' in stderr: | 124 if os.path.exists(get_archive_path): |
| 103 return 1 | 125 cmd = ['python', get_archive_path, 'sdk'] |
| 126 if revision_num != -1: |
| 127 cmd += ['-r', revision_num] |
| 128 _, stderr = self.run_cmd(cmd) |
| 129 if not os.path.exists(get_archive_path) or 'InvalidUriError' in stderr: |
| 130 # Couldn't find the SDK on Google Storage. Build it locally. |
| 131 |
| 132 # On Windows, the output directory is marked as "Read Only," which causes |
| 133 # an error to be thrown when we use shutil.rmtree. This helper function |
| 134 # changes the permissions so we can still delete the directory. |
| 135 def on_rm_error(func, path, exc_info): |
| 136 if os.path.exists(path): |
| 137 os.chmod(path, stat.S_IWRITE) |
| 138 os.unlink(path) |
| 139 # TODO(efortuna): Currently always building ia32 architecture because we |
| 140 # don't have test statistics for what's passing on x64. Eliminate arch |
| 141 # specification when we have tests running on x64, too. |
| 142 shutil.rmtree(os.path.join(os.getcwd(), |
| 143 utils.GetBuildRoot(utils.GuessOS(), 'release', 'ia32')), |
| 144 onerror=on_rm_error) |
| 145 lines = self.run_cmd([os.path.join('.', 'tools', 'build.py'), '-m', |
| 146 'release', '--arch=ia32', 'create_sdk']) |
| 147 |
| 148 for line in lines: |
| 149 if 'BUILD FAILED' in lines: |
| 150 # Someone checked in a broken build! Stop trying to make it work |
| 151 # and wait to try again. |
| 152 print 'Broken Build' |
| 153 return 1 |
| 104 return 0 | 154 return 0 |
| 105 | 155 |
| 106 def ensure_output_directory(self, dir_name): | 156 def ensure_output_directory(self, dir_name): |
| 107 """Test that the listed directory name exists, and if not, create one for | 157 """Test that the listed directory name exists, and if not, create one for |
| 108 our output to be placed. | 158 our output to be placed. |
| 109 | 159 |
| 110 Args: | 160 Args: |
| 111 dir_name: the directory we will create if it does not exist.""" | 161 dir_name: the directory we will create if it does not exist.""" |
| 112 dir_path = os.path.join(DART_INSTALL_LOCATION, 'tools', | 162 dir_path = os.path.join(TOP_LEVEL_DIR, 'tools', |
| 113 'testing', 'perf_testing', dir_name) | 163 'testing', 'perf_testing', dir_name) |
| 114 if not os.path.exists(dir_path): | 164 if not os.path.exists(dir_path): |
| 115 os.makedirs(dir_path) | 165 os.makedirs(dir_path) |
| 116 print 'Creating output directory ', dir_path | 166 print 'Creating output directory ', dir_path |
| 117 | 167 |
| 118 def has_new_code(self): | 168 def has_new_code(self): |
| 119 """Tests if there are any newer versions of files on the server.""" | 169 """Tests if there are any newer versions of files on the server.""" |
| 120 os.chdir(DART_INSTALL_LOCATION) | 170 os.chdir(DART_REPO_LOC) |
| 121 # Pass 'p' in if we have a new certificate for the svn server, we want to | 171 # Pass 'p' in if we have a new certificate for the svn server, we want to |
| 122 # (p)ermanently accept it. | 172 # (p)ermanently accept it. |
| 123 results, _ = self.run_cmd(['svn', 'st', '-u'], std_in='p\r\n') | 173 results, _ = self.run_cmd(['svn', 'st', '-u'], std_in='p\r\n') |
| 124 for line in results: | 174 for line in results: |
| 125 if '*' in line: | 175 if '*' in line: |
| 126 return True | 176 return True |
| 127 return False | 177 return False |
| 128 | 178 |
| 129 def get_os_directory(self): | 179 def get_os_directory(self): |
| 130 """Specifies the name of the directory for the testing build of dart, which | 180 """Specifies the name of the directory for the testing build of dart, which |
| 131 has yet a different naming convention from utils.getBuildRoot(...).""" | 181 has yet a different naming convention from utils.getBuildRoot(...).""" |
| 132 if platform.system() == 'Windows': | 182 if platform.system() == 'Windows': |
| 133 return 'windows' | 183 return 'windows' |
| 134 elif platform.system() == 'Darwin': | 184 elif platform.system() == 'Darwin': |
| 135 return 'macos' | 185 return 'macos' |
| 136 else: | 186 else: |
| 137 return 'linux' | 187 return 'linux' |
| 138 | 188 |
| 139 def parse_args(self): | 189 def parse_args(self): |
| 140 parser = optparse.OptionParser() | 190 parser = optparse.OptionParser() |
| 141 parser.add_option('--suites', '-s', dest='suites', help='Run the specified ' | 191 parser.add_option('--suites', '-s', dest='suites', help='Run the specified ' |
| 142 'comma-separated test suites from set: %s' % \ | 192 'comma-separated test suites from set: %s' % \ |
| 143 ','.join(TestBuilder.available_suite_names()), | 193 ','.join(TestBuilder.available_suite_names()), |
| 144 action='store', default=None) | 194 action='store', default=None) |
| 145 parser.add_option('--forever', '-f', dest='continuous', help='Run this scri' | 195 parser.add_option('--forever', '-f', dest='continuous', help='Run this scri' |
| 146 'pt forever, always checking for the next svn checkin', | 196 'pt forever, always checking for the next svn checkin', |
| 147 action='store_true', default=False) | 197 action='store_true', default=False) |
| 198 parser.add_option('--incremental', '-i', dest='incremental', |
| 199 help='Start an an early revision and work your way ' |
| 200 'forward through CLs sequentially', action='store_true', |
| 201 default=False) |
| 148 parser.add_option('--nobuild', '-n', dest='no_build', action='store_true', | 202 parser.add_option('--nobuild', '-n', dest='no_build', action='store_true', |
| 149 help='Do not sync with the repository and do not ' | 203 help='Do not sync with the repository and do not ' |
| 150 'rebuild.', default=False) | 204 'rebuild.', default=False) |
| 151 parser.add_option('--noupload', '-u', dest='no_upload', action='store_true', | 205 parser.add_option('--noupload', '-u', dest='no_upload', action='store_true', |
| 152 help='Do not post the results of the run.', default=False) | 206 help='Do not post the results of the run.', default=False) |
| 153 parser.add_option('--notest', '-t', dest='no_test', action='store_true', | 207 parser.add_option('--notest', '-t', dest='no_test', action='store_true', |
| 154 help='Do not run the tests.', default=False) | 208 help='Do not run the tests.', default=False) |
| 155 parser.add_option('--verbose', '-v', dest='verbose', help='Print extra ' | 209 parser.add_option('--verbose', '-v', dest='verbose', help='Print extra ' |
| 156 'debug output', action='store_true', default=False) | 210 'debug output', action='store_true', default=False) |
| 157 | 211 |
| 158 args, ignored = parser.parse_args() | 212 args, ignored = parser.parse_args() |
| 159 | 213 |
| 160 if not args.suites: | 214 if not args.suites: |
| 161 suites = TestBuilder.available_suite_names() | 215 suites = TestBuilder.available_suite_names() |
| 162 else: | 216 else: |
| 163 suites = [] | 217 suites = [] |
| 164 suitelist = args.suites.split(',') | 218 suitelist = args.suites.split(',') |
| 165 for name in suitelist: | 219 for name in suitelist: |
| 166 if name in TestBuilder.available_suite_names(): | 220 if name in TestBuilder.available_suite_names(): |
| 167 suites.append(name) | 221 suites.append(name) |
| 168 else: | 222 else: |
| 169 print ('Error: Invalid suite %s not in ' % name) + \ | 223 print ('Error: Invalid suite %s not in ' % name) + \ |
| 170 '%s' % ','.join(TestBuilder.available_suite_names()) | 224 '%s' % ','.join(TestBuilder.available_suite_names()) |
| 171 sys.exit(1) | 225 sys.exit(1) |
| 172 self.suite_names = suites | 226 self.suite_names = suites |
| 173 self.no_build = args.no_build | 227 self.no_build = args.no_build |
| 174 self.no_upload = args.no_upload | 228 self.no_upload = args.no_upload |
| 175 self.no_test = args.no_test | 229 self.no_test = args.no_test |
| 176 self.verbose = args.verbose | 230 self.verbose = args.verbose |
| 177 return args.continuous | 231 return args.continuous, args.incremental |
| 178 | 232 |
| 179 def run_test_sequence(self, revision_num='', num_reruns=1): | 233 def run_test_sequence(self, revision_num='', num_reruns=1): |
| 180 """Run the set of commands to (possibly) build, run, and post the results | 234 """Run the set of commands to (possibly) build, run, and post the results |
| 181 of our tests. Returns 0 on a successful run, 1 if we fail to post results or | 235 of our tests. Returns 0 on a successful run, 1 if we fail to post results or |
| 182 the run failed, -1 if the build is broken. | 236 the run failed, -1 if the build is broken. |
| 183 """ | 237 """ |
| 184 suites = [] | 238 suites = [] |
| 185 success = True | 239 success = True |
| 240 if not self.no_build and self.sync_and_build(suites, revision_num) == 1: |
| 241 return -1 # The build is broken. |
| 242 |
| 186 for name in self.suite_names: | 243 for name in self.suite_names: |
| 187 for run in range(num_reruns): | 244 for run in range(num_reruns): |
| 188 suites += [TestBuilder.make_test(name, self)] | 245 suites += [TestBuilder.make_test(name, self)] |
| 189 | 246 |
| 190 if not self.no_build and self.sync_and_build(suites, revision_num) == 1: | |
| 191 return -1 # The build is broken. | |
| 192 | |
| 193 for test in suites: | 247 for test in suites: |
| 194 success = success and test.run() | 248 success = success and test.run() |
| 195 if success: | 249 if success: |
| 196 return 0 | 250 return 0 |
| 197 else: | 251 else: |
| 198 return 1 | 252 return 1 |
| 199 | 253 |
| 200 | 254 |
| 201 class Test(object): | 255 class Test(object): |
| 202 """The base class to provide shared code for different tests we will run and | 256 """The base class to provide shared code for different tests we will run and |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 254 """ | 308 """ |
| 255 return True | 309 return True |
| 256 | 310 |
| 257 def run(self): | 311 def run(self): |
| 258 """Run the benchmarks/tests from the command line and plot the | 312 """Run the benchmarks/tests from the command line and plot the |
| 259 results. | 313 results. |
| 260 """ | 314 """ |
| 261 for visitor in [self.tester, self.file_processor]: | 315 for visitor in [self.tester, self.file_processor]: |
| 262 visitor.prepare() | 316 visitor.prepare() |
| 263 | 317 |
| 264 os.chdir(DART_INSTALL_LOCATION) | 318 os.chdir(TOP_LEVEL_DIR) |
| 265 self.test_runner.ensure_output_directory(self.result_folder_name) | 319 self.test_runner.ensure_output_directory(self.result_folder_name) |
| 266 self.test_runner.ensure_output_directory(os.path.join( | 320 self.test_runner.ensure_output_directory(os.path.join( |
| 267 'old', self.result_folder_name)) | 321 'old', self.result_folder_name)) |
| 322 os.chdir(DART_REPO_LOC) |
| 268 if not self.test_runner.no_test: | 323 if not self.test_runner.no_test: |
| 269 self.tester.run_tests() | 324 self.tester.run_tests() |
| 270 | 325 |
| 271 os.chdir(os.path.join('tools', 'testing', 'perf_testing')) | 326 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'perf_testing')) |
| 272 | |
| 273 for afile in os.listdir(os.path.join('old', self.result_folder_name)): | |
| 274 if not afile.startswith('.'): | |
| 275 self.file_processor.process_file(afile, False) | |
| 276 | 327 |
| 277 files = os.listdir(self.result_folder_name) | 328 files = os.listdir(self.result_folder_name) |
| 278 post_success = True | 329 post_success = True |
| 330 os.chdir(TOP_LEVEL_DIR) |
| 279 for afile in files: | 331 for afile in files: |
| 280 if not afile.startswith('.'): | 332 if not afile.startswith('.'): |
| 281 should_move_file = self.file_processor.process_file(afile, True) | 333 should_move_file = self.file_processor.process_file(afile, True) |
| 282 if should_move_file: | 334 if should_move_file: |
| 283 shutil.move(os.path.join(self.result_folder_name, afile), | 335 shutil.move(os.path.join(self.result_folder_name, afile), |
| 284 os.path.join('old', self.result_folder_name, afile)) | 336 os.path.join('old', self.result_folder_name, afile)) |
| 285 else: | 337 else: |
| 286 post_success = False | 338 post_success = False |
| 287 | 339 |
| 288 return post_success | 340 return post_success |
| 289 | 341 |
| 290 | 342 |
| 291 class Tester(object): | 343 class Tester(object): |
| 292 """The base level visitor class that runs tests. It contains convenience | 344 """The base level visitor class that runs tests. It contains convenience |
| 293 methods that many Tester objects use. Any class that would like to be a | 345 methods that many Tester objects use. Any class that would like to be a |
| 294 TesterVisitor must implement the run_tests() method.""" | 346 TesterVisitor must implement the run_tests() method.""" |
| 295 | 347 |
| 296 def __init__(self, test): | 348 def __init__(self, test): |
| 297 self.test = test | 349 self.test = test |
| 298 | 350 |
| 299 def prepare(self): | 351 def prepare(self): |
| 300 """Perform any initial setup required before the test is run.""" | 352 """Perform any initial setup required before the test is run.""" |
| 301 pass | 353 pass |
| 302 | 354 |
| 303 def add_svn_revision_to_trace(self, outfile, browser = None): | 355 def add_svn_revision_to_trace(self, outfile, browser = None): |
| 304 """Add the svn version number to the provided tracefile.""" | 356 """Add the svn version number to the provided tracefile.""" |
| 305 def get_dartium_revision(): | 357 def get_dartium_revision(): |
| 306 version_file_name = os.path.join(DART_INSTALL_LOCATION, 'client', 'tests', | 358 version_file_name = os.path.join(TOP_LEVEL_DIR, 'client', 'tests', |
| 307 'dartium', 'LAST_VERSION') | 359 'dartium', 'LAST_VERSION') |
| 308 version_file = open(version_file_name, 'r') | 360 version_file = open(version_file_name, 'r') |
| 309 version = version_file.read().split('.')[-2] | 361 version = version_file.read().split('.')[-2] |
| 310 version_file.close() | 362 version_file.close() |
| 311 return version | 363 return version |
| 312 | 364 |
| 313 if browser and browser == 'dartium': | 365 if browser and browser == 'dartium': |
| 314 revision = get_dartium_revision() | 366 revision = get_dartium_revision() |
| 315 self.test.test_runner.run_cmd(['echo', 'Revision: ' + revision], outfile) | 367 self.test.test_runner.run_cmd(['echo', 'Revision: ' + revision], outfile) |
| 316 else: | 368 else: |
| (...skipping 19 matching lines...) Expand all Loading... |
| 336 """Perform any initial setup required before the test is run.""" | 388 """Perform any initial setup required before the test is run.""" |
| 337 pass | 389 pass |
| 338 | 390 |
| 339 def open_trace_file(self, afile, not_yet_uploaded): | 391 def open_trace_file(self, afile, not_yet_uploaded): |
| 340 """Find the correct location for the trace file, and open it. | 392 """Find the correct location for the trace file, and open it. |
| 341 Args: | 393 Args: |
| 342 afile: The tracefile name. | 394 afile: The tracefile name. |
| 343 not_yet_uploaded: True if this file is to be found in a directory that | 395 not_yet_uploaded: True if this file is to be found in a directory that |
| 344 contains un-uploaded data. | 396 contains un-uploaded data. |
| 345 Returns: A file object corresponding to the given file name.""" | 397 Returns: A file object corresponding to the given file name.""" |
| 346 file_path = os.path.join(self.test.result_folder_name, afile) | 398 file_path = os.path.join(TOP_LEVEL_DIR, self.test.result_folder_name, afile) |
| 347 if not not_yet_uploaded: | 399 if not not_yet_uploaded: |
| 348 file_path = os.path.join('old', file_path) | 400 file_path = os.path.join('old', file_path) |
| 349 return open(file_path) | 401 return open(file_path) |
| 350 | 402 |
| 351 def report_results(self, benchmark_name, score, platform, variant, | 403 def report_results(self, benchmark_name, score, platform, variant, |
| 352 revision_number, metric): | 404 revision_number, metric): |
| 353 """Store the results of the benchmark run. | 405 """Store the results of the benchmark run. |
| 354 Args: | 406 Args: |
| 355 benchmark_name: The name of the individual benchmark. | 407 benchmark_name: The name of the individual benchmark. |
| 356 score: The numerical value of this benchmark. | 408 score: The numerical value of this benchmark. |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 422 if add_dartium: | 474 if add_dartium: |
| 423 browsers += ['dartium'] | 475 browsers += ['dartium'] |
| 424 has_shell = False | 476 has_shell = False |
| 425 if platform.system() == 'Darwin': | 477 if platform.system() == 'Darwin': |
| 426 browsers += ['safari'] | 478 browsers += ['safari'] |
| 427 if platform.system() == 'Windows': | 479 if platform.system() == 'Windows': |
| 428 browsers += ['ie'] | 480 browsers += ['ie'] |
| 429 has_shell = True | 481 has_shell = True |
| 430 if 'dartium' in browsers: | 482 if 'dartium' in browsers: |
| 431 # Fetch it if necessary. | 483 # Fetch it if necessary. |
| 432 get_dartium = ['python', | 484 get_dartium = ['python', os.path.join(DART_REPO_LOC, 'tools', |
| 433 os.path.join(DART_INSTALL_LOCATION, 'tools', | |
| 434 'get_archive.py'), 'dartium'] | 485 'get_archive.py'), 'dartium'] |
| 435 # TODO(vsm): It's inconvenient that run_cmd isn't in scope here. | 486 # TODO(vsm): It's inconvenient that run_cmd isn't in scope here. |
| 436 # Perhaps there is a better place to put that or this. | 487 # Perhaps there is a better place to put that or this. |
| 437 subprocess.call(get_dartium, stdout=sys.stdout, stderr=sys.stderr, | 488 subprocess.call(get_dartium, stdout=sys.stdout, stderr=sys.stderr, |
| 438 shell=has_shell) | 489 shell=has_shell) |
| 439 return browsers | 490 return browsers |
| 440 | 491 |
| 441 | 492 |
| 442 class CommonBrowserTest(RuntimePerformanceTest): | 493 class CommonBrowserTest(RuntimePerformanceTest): |
| 443 """Runs this basic performance tests (Benchpress, some V8 benchmarks) in the | 494 """Runs this basic performance tests (Benchpress, some V8 benchmarks) in the |
| (...skipping 16 matching lines...) Expand all Loading... |
| 460 @staticmethod | 511 @staticmethod |
| 461 def get_standalone_benchmarks(): | 512 def get_standalone_benchmarks(): |
| 462 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees', | 513 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees', |
| 463 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute', | 514 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute', |
| 464 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers', | 515 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers', |
| 465 'TreeSort'] | 516 'TreeSort'] |
| 466 | 517 |
| 467 class CommonBrowserTester(BrowserTester): | 518 class CommonBrowserTester(BrowserTester): |
| 468 def run_tests(self): | 519 def run_tests(self): |
| 469 """Run a performance test in the browser.""" | 520 """Run a performance test in the browser.""" |
| 521 os.chdir(DART_REPO_LOC) |
| 470 self.test.test_runner.run_cmd([ | 522 self.test.test_runner.run_cmd([ |
| 471 'python', os.path.join('internal', 'browserBenchmarks', | 523 'python', os.path.join('internal', 'browserBenchmarks', |
| 472 'make_web_benchmarks.py')]) | 524 'make_web_benchmarks.py')]) |
| 473 | 525 |
| 474 for browser in self.test.platform_list: | 526 for browser in self.test.platform_list: |
| 475 for version in self.test.versions: | 527 for version in self.test.versions: |
| 476 if not self.test.is_valid_combination(browser, version): | 528 if not self.test.is_valid_combination(browser, version): |
| 477 continue | 529 continue |
| 478 self.test.trace_file = os.path.join( | 530 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, |
| 479 'tools', 'testing', 'perf_testing', self.test.result_folder_name, | 531 'tools', 'testing', 'perf_testing', self.test.result_folder_name, |
| 480 'perf-%s-%s-%s' % (self.test.cur_time, browser, version)) | 532 'perf-%s-%s-%s' % (self.test.cur_time, browser, version)) |
| 481 self.add_svn_revision_to_trace(self.test.trace_file, browser) | 533 self.add_svn_revision_to_trace(self.test.trace_file, browser) |
| 482 file_path = os.path.join( | 534 file_path = os.path.join( |
| 483 os.getcwd(), 'internal', 'browserBenchmarks', | 535 os.getcwd(), 'internal', 'browserBenchmarks', |
| 484 'benchmark_page_%s.html' % version) | 536 'benchmark_page_%s.html' % version) |
| 485 self.test.test_runner.run_cmd( | 537 self.test.test_runner.run_cmd( |
| 486 ['python', os.path.join('tools', 'testing', 'run_selenium.py'), | 538 ['python', os.path.join('tools', 'testing', 'run_selenium.py'), |
| 487 '--out', file_path, '--browser', browser, | 539 '--out', file_path, '--browser', browser, |
| 488 '--timeout', '600', '--mode', 'perf'], self.test.trace_file, | 540 '--timeout', '600', '--mode', 'perf'], self.test.trace_file, |
| 489 append=True) | 541 append=True) |
| 490 | 542 |
| 491 class CommonBrowserFileProcessor(Processor): | 543 class CommonBrowserFileProcessor(Processor): |
| 492 | 544 |
| 493 def process_file(self, afile, should_post_file): | 545 def process_file(self, afile, should_post_file): |
| 494 """Comb through the html to find the performance results. | 546 """Comb through the html to find the performance results. |
| 495 Returns: True if we successfully posted our data to storage and/or we can | 547 Returns: True if we successfully posted our data to storage and/or we can |
| 496 delete the trace file.""" | 548 delete the trace file.""" |
| 497 os.chdir(os.path.join(DART_INSTALL_LOCATION, 'tools', | 549 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', |
| 498 'testing', 'perf_testing')) | 550 'testing', 'perf_testing')) |
| 499 parts = afile.split('-') | 551 parts = afile.split('-') |
| 500 browser = parts[2] | 552 browser = parts[2] |
| 501 version = parts[3] | 553 version = parts[3] |
| 502 f = self.open_trace_file(afile, should_post_file) | 554 f = self.open_trace_file(afile, should_post_file) |
| 503 lines = f.readlines() | 555 lines = f.readlines() |
| 504 line = '' | 556 line = '' |
| 505 i = 0 | 557 i = 0 |
| 506 revision_num = 0 | 558 revision_num = 0 |
| 507 while '<div id="results">' not in line and i < len(lines): | 559 while '<div id="results">' not in line and i < len(lines): |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 607 def get_dromaeo_benchmarks(): | 659 def get_dromaeo_benchmarks(): |
| 608 valid = DromaeoTester.get_valid_dromaeo_tags() | 660 valid = DromaeoTester.get_valid_dromaeo_tags() |
| 609 benchmarks = reduce(lambda l1,l2: l1+l2, | 661 benchmarks = reduce(lambda l1,l2: l1+l2, |
| 610 [tests for (tag, tests) in | 662 [tests for (tag, tests) in |
| 611 DromaeoTester.DROMAEO_BENCHMARKS.values() | 663 DromaeoTester.DROMAEO_BENCHMARKS.values() |
| 612 if tag in valid]) | 664 if tag in valid]) |
| 613 return map(DromaeoTester.legalize_filename, benchmarks) | 665 return map(DromaeoTester.legalize_filename, benchmarks) |
| 614 | 666 |
| 615 @staticmethod | 667 @staticmethod |
| 616 def get_dromaeo_versions(): | 668 def get_dromaeo_versions(): |
| 617 return ['js', 'frog_dom', 'frog_html', 'dart2js_dom', 'dart2js_html'] | 669 # TODO(vsm): why is the js version closing early? |
| 670 return ['dart2js_dom', 'dart2js_html'] |
| 671 #return ['js', 'dart2js_dom', 'dart2js_html'] |
| 618 | 672 |
| 619 | 673 |
| 620 class DromaeoTest(RuntimePerformanceTest): | 674 class DromaeoTest(RuntimePerformanceTest): |
| 621 """Runs Dromaeo tests, in the browser.""" | 675 """Runs Dromaeo tests, in the browser.""" |
| 622 def __init__(self, test_runner): | 676 def __init__(self, test_runner): |
| 623 super(DromaeoTest, self).__init__( | 677 super(DromaeoTest, self).__init__( |
| 624 self.name(), | 678 self.name(), |
| 625 BrowserTester.get_browsers(), | 679 BrowserTester.get_browsers(), |
| 626 'browser', | 680 'browser', |
| 627 DromaeoTester.get_dromaeo_versions(), | 681 DromaeoTester.get_dromaeo_versions(), |
| (...skipping 20 matching lines...) Expand all Loading... |
| 648 def move_chrome_driver_if_needed(self, browser): | 702 def move_chrome_driver_if_needed(self, browser): |
| 649 """Move the appropriate version of ChromeDriver onto the path. | 703 """Move the appropriate version of ChromeDriver onto the path. |
| 650 TODO(efortuna): This is a total hack because the latest version of Chrome | 704 TODO(efortuna): This is a total hack because the latest version of Chrome |
| 651 (Dartium builds) requires a different version of ChromeDriver, that is | 705 (Dartium builds) requires a different version of ChromeDriver, that is |
| 652 incompatible with the release or beta Chrome and vice versa. Remove these | 706 incompatible with the release or beta Chrome and vice versa. Remove these |
| 653 shenanigans once we're back to both versions of Chrome using the same | 707 shenanigans once we're back to both versions of Chrome using the same |
| 654 version of ChromeDriver. IMPORTANT NOTE: This assumes your chromedriver is | 708 version of ChromeDriver. IMPORTANT NOTE: This assumes your chromedriver is |
| 655 in the default location (inside depot_tools). | 709 in the default location (inside depot_tools). |
| 656 """ | 710 """ |
| 657 current_dir = os.getcwd() | 711 current_dir = os.getcwd() |
| 658 os.chdir(DART_INSTALL_LOCATION) | |
| 659 self.test.test_runner.run_cmd(['python', os.path.join( | 712 self.test.test_runner.run_cmd(['python', os.path.join( |
| 660 'tools', 'get_archive.py'), 'chromedriver']) | 713 'tools', 'get_archive.py'), 'chromedriver']) |
| 661 path = os.environ['PATH'].split(os.pathsep) | 714 path = os.environ['PATH'].split(os.pathsep) |
| 662 orig_chromedriver_path = os.path.join('tools', 'testing', | 715 orig_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', 'testing', |
| 663 'orig-chromedriver') | 716 'orig-chromedriver') |
| 664 dartium_chromedriver_path = os.path.join('tools', 'testing', | 717 dartium_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', |
| 718 'testing', |
| 665 'dartium-chromedriver') | 719 'dartium-chromedriver') |
| 666 extension = '' | 720 extension = '' |
| 667 if platform.system() == 'Windows': | 721 if platform.system() == 'Windows': |
| 668 extension = '.exe' | 722 extension = '.exe' |
| 669 | 723 |
| 670 def move_chromedriver(depot_tools, copy_to_depot_tools_dir=True, | 724 def move_chromedriver(depot_tools, copy_to_depot_tools_dir=True, |
| 671 from_path=None): | 725 from_path=None): |
| 672 if from_path: | 726 if from_path: |
| 673 from_dir = from_path + extension | 727 from_dir = from_path + extension |
| 674 else: | 728 else: |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 707 | 761 |
| 708 # Build tests. | 762 # Build tests. |
| 709 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo') | 763 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo') |
| 710 current_path = os.getcwd() | 764 current_path = os.getcwd() |
| 711 os.chdir(dromaeo_path) | 765 os.chdir(dromaeo_path) |
| 712 self.test.test_runner.run_cmd(['python', 'generate_dart2js_tests.py']) | 766 self.test.test_runner.run_cmd(['python', 'generate_dart2js_tests.py']) |
| 713 os.chdir(current_path) | 767 os.chdir(current_path) |
| 714 | 768 |
| 715 versions = DromaeoTester.get_dromaeo_versions() | 769 versions = DromaeoTester.get_dromaeo_versions() |
| 716 | 770 |
| 717 for browser in filter(lambda x: x != 'ie', BrowserTester.get_browsers()): | 771 for browser in BrowserTester.get_browsers(): |
| 718 self.move_chrome_driver_if_needed(browser) | 772 self.move_chrome_driver_if_needed(browser) |
| 719 for version_name in versions: | 773 for version_name in versions: |
| 720 if not self.test.is_valid_combination(browser, version_name): | 774 if not self.test.is_valid_combination(browser, version_name): |
| 721 continue | 775 continue |
| 722 version = DromaeoTest.DromaeoPerfTester.get_dromaeo_url_query( | 776 version = DromaeoTest.DromaeoPerfTester.get_dromaeo_url_query( |
| 723 browser, version_name) | 777 browser, version_name) |
| 724 self.test.trace_file = os.path.join( | 778 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, |
| 725 'tools', 'testing', 'perf_testing', self.test.result_folder_name, | 779 'tools', 'testing', 'perf_testing', self.test.result_folder_name, |
| 726 'dromaeo-%s-%s-%s' % (self.test.cur_time, browser, version_name)) | 780 'dromaeo-%s-%s-%s' % (self.test.cur_time, browser, version_name)) |
| 727 self.add_svn_revision_to_trace(self.test.trace_file, browser) | 781 self.add_svn_revision_to_trace(self.test.trace_file, browser) |
| 728 file_path = '"%s"' % os.path.join(os.getcwd(), dromaeo_path, | 782 file_path = '"%s"' % os.path.join(os.getcwd(), dromaeo_path, |
| 729 'index-js.html?%s' % version) | 783 'index-js.html?%s' % version) |
| 730 if platform.system() == 'Windows': | 784 if platform.system() == 'Windows': |
| 731 file_path = file_path.replace('&', '^&') | 785 file_path = file_path.replace('&', '^&') |
| 732 file_path = file_path.replace('?', '^?') | 786 file_path = file_path.replace('?', '^?') |
| 733 file_path = file_path.replace('|', '^|') | 787 file_path = file_path.replace('|', '^|') |
| 734 self.test.test_runner.run_cmd( | 788 self.test.test_runner.run_cmd( |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 816 class DromaeoSizeTester(DromaeoTester): | 870 class DromaeoSizeTester(DromaeoTester): |
| 817 def run_tests(self): | 871 def run_tests(self): |
| 818 # Build tests. | 872 # Build tests. |
| 819 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo') | 873 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo') |
| 820 current_path = os.getcwd() | 874 current_path = os.getcwd() |
| 821 os.chdir(dromaeo_path) | 875 os.chdir(dromaeo_path) |
| 822 self.test.test_runner.run_cmd( | 876 self.test.test_runner.run_cmd( |
| 823 ['python', os.path.join('generate_dart2js_tests.py')]) | 877 ['python', os.path.join('generate_dart2js_tests.py')]) |
| 824 os.chdir(current_path) | 878 os.chdir(current_path) |
| 825 | 879 |
| 826 self.test.trace_file = os.path.join( | 880 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, |
| 827 'tools', 'testing', 'perf_testing', self.test.result_folder_name, | 881 'tools', 'testing', 'perf_testing', self.test.result_folder_name, |
| 828 self.test.result_folder_name + self.test.cur_time) | 882 self.test.result_folder_name + self.test.cur_time) |
| 829 self.add_svn_revision_to_trace(self.test.trace_file) | 883 self.add_svn_revision_to_trace(self.test.trace_file) |
| 830 | 884 |
| 831 variants = [ | 885 variants = [ |
| 832 ('frog_dom', ''), | 886 ('frog_dom', ''), |
| 833 ('frog_html', '-html'), | 887 ('frog_html', '-html'), |
| 834 ('frog_htmlidiomatic', '-htmlidiomatic')] | 888 ('frog_htmlidiomatic', '-htmlidiomatic')] |
| 835 | 889 |
| 836 test_path = os.path.join(dromaeo_path, 'tests') | 890 test_path = os.path.join(dromaeo_path, 'tests') |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 877 self.test.trace_file, append=True) | 931 self.test.trace_file, append=True) |
| 878 | 932 |
| 879 | 933 |
| 880 class DromaeoSizeProcessor(Processor): | 934 class DromaeoSizeProcessor(Processor): |
| 881 def process_file(self, afile, should_post_file): | 935 def process_file(self, afile, should_post_file): |
| 882 """Pull all the relevant information out of a given tracefile. | 936 """Pull all the relevant information out of a given tracefile. |
| 883 | 937 |
| 884 Args: | 938 Args: |
| 885 afile: is the filename string we will be processing. | 939 afile: is the filename string we will be processing. |
| 886 Returns: True if we successfully posted our data to storage.""" | 940 Returns: True if we successfully posted our data to storage.""" |
| 887 os.chdir(os.path.join(DART_INSTALL_LOCATION, 'tools', | 941 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', |
| 888 'testing', 'perf_testing')) | 942 'testing', 'perf_testing')) |
| 889 f = self.open_trace_file(afile, should_post_file) | 943 f = self.open_trace_file(afile, should_post_file) |
| 890 tabulate_data = False | 944 tabulate_data = False |
| 891 revision_num = 0 | 945 revision_num = 0 |
| 892 revision_pattern = r'Revision: (\d+)' | 946 revision_pattern = r'Revision: (\d+)' |
| 893 result_pattern = r'Size \((\w+), ([a-zA-Z0-9-]+)\): (\d+)' | 947 result_pattern = r'Size \((\w+), ([a-zA-Z0-9-]+)\): (\d+)' |
| 894 | 948 |
| 895 upload_success = True | 949 upload_success = True |
| 896 for line in f.readlines(): | 950 for line in f.readlines(): |
| 897 rev = re.match(revision_pattern, line.strip()) | 951 rev = re.match(revision_pattern, line.strip()) |
| (...skipping 29 matching lines...) Expand all Loading... |
| 927 | 981 |
| 928 class CompileTimeAndSizeTest(Test): | 982 class CompileTimeAndSizeTest(Test): |
| 929 """Run tests to determine how long frogc takes to compile, and the compiled | 983 """Run tests to determine how long frogc takes to compile, and the compiled |
| 930 file output size of some benchmarking files. | 984 file output size of some benchmarking files. |
| 931 Note: This test is now 'deprecated' since frog is no longer in the sdk. We | 985 Note: This test is now 'deprecated' since frog is no longer in the sdk. We |
| 932 just return the last numbers found for frog.""" | 986 just return the last numbers found for frog.""" |
| 933 def __init__(self, test_runner): | 987 def __init__(self, test_runner): |
| 934 """Reference to the test_runner object that notifies us when to begin | 988 """Reference to the test_runner object that notifies us when to begin |
| 935 testing.""" | 989 testing.""" |
| 936 super(CompileTimeAndSizeTest, self).__init__( | 990 super(CompileTimeAndSizeTest, self).__init__( |
| 937 self.name(), ['commandline'], ['frog'], ['swarm', 'total'], | 991 self.name(), ['commandline'], ['dart2js'], ['swarm'], |
| 938 test_runner, self.CompileTester(self), | 992 test_runner, self.CompileTester(self), |
| 939 self.CompileProcessor(self)) | 993 self.CompileProcessor(self)) |
| 940 self.dart_compiler = os.path.join( | 994 self.dart_compiler = os.path.join( |
| 941 DART_INSTALL_LOCATION, utils.GetBuildRoot(utils.GuessOS(), | 995 DART_REPO_LOC, utils.GetBuildRoot(utils.GuessOS(), |
| 942 'release', 'ia32'), 'dart-sdk', 'bin', 'frogc') | 996 'release', 'ia32'), 'dart-sdk', 'bin', 'dart2js') |
| 943 _suffix = '' | 997 _suffix = '' |
| 944 if platform.system() == 'Windows': | 998 if platform.system() == 'Windows': |
| 945 _suffix = '.exe' | 999 _suffix = '.exe' |
| 946 self.dart_vm = os.path.join( | 1000 self.failure_threshold = {'swarm' : 100} |
| 947 DART_INSTALL_LOCATION, utils.GetBuildRoot(utils.GuessOS(), | |
| 948 'release', 'ia32'), 'dart-sdk', 'bin','dart' + _suffix) | |
| 949 self.failure_threshold = {'swarm' : 100, 'total' : 100} | |
| 950 | 1001 |
| 951 @staticmethod | 1002 @staticmethod |
| 952 def name(): | 1003 def name(): |
| 953 return 'time-size' | 1004 return 'time-size' |
| 954 | 1005 |
| 955 class CompileTester(Tester): | 1006 class CompileTester(Tester): |
| 956 def run_tests(self): | 1007 def run_tests(self): |
| 957 os.chdir('frog') | 1008 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, |
| 958 self.test.trace_file = os.path.join( | 1009 'tools', 'testing', 'perf_testing', |
| 959 '..', 'tools', 'testing', 'perf_testing', | |
| 960 self.test.result_folder_name, | 1010 self.test.result_folder_name, |
| 961 self.test.result_folder_name + self.test.cur_time) | 1011 self.test.result_folder_name + self.test.cur_time) |
| 962 | 1012 |
| 963 self.add_svn_revision_to_trace(self.test.trace_file) | 1013 self.add_svn_revision_to_trace(self.test.trace_file) |
| 964 | 1014 |
| 1015 self.test.test_runner.run_cmd( |
| 1016 ['./xcodebuild/ReleaseIA32/dart-sdk/dart2js', '-c', '-o', |
| 1017 'swarm-result', os.path.join('samples', 'swarm', 'swarm.dart')]) |
| 965 swarm_size = 0 | 1018 swarm_size = 0 |
| 966 try: | 1019 try: |
| 967 swarm_size = os.path.getsize('swarm-result') | 1020 swarm_size = os.path.getsize('swarm-result') |
| 968 except OSError: | 1021 except OSError: |
| 969 pass #If compilation failed, continue on running other tests. | 1022 pass #If compilation failed, continue on running other tests. |
| 970 | 1023 |
| 971 total_size = 0 | |
| 972 try: | |
| 973 total_size = os.path.getsize('total-result') | |
| 974 except OSError: | |
| 975 pass #If compilation failed, continue on running other tests. | |
| 976 | |
| 977 self.test.test_runner.run_cmd( | 1024 self.test.test_runner.run_cmd( |
| 978 ['echo', '%d Generated checked swarm size' % swarm_size], | 1025 ['echo', '%d Generated checked swarm size' % swarm_size], |
| 979 self.test.trace_file, append=True) | 1026 self.test.trace_file, append=True) |
| 980 | 1027 |
| 981 self.test.test_runner.run_cmd( | |
| 982 ['echo', '%d Generated checked total size' % total_size], | |
| 983 self.test.trace_file, append=True) | |
| 984 | |
| 985 os.chdir('..') | |
| 986 | |
| 987 | |
| 988 class CompileProcessor(Processor): | 1028 class CompileProcessor(Processor): |
| 989 | |
| 990 def process_file(self, afile, should_post_file): | 1029 def process_file(self, afile, should_post_file): |
| 991 """Pull all the relevant information out of a given tracefile. | 1030 """Pull all the relevant information out of a given tracefile. |
| 992 | 1031 |
| 993 Args: | 1032 Args: |
| 994 afile: is the filename string we will be processing. | 1033 afile: is the filename string we will be processing. |
| 995 Returns: True if we successfully posted our data to storage.""" | 1034 Returns: True if we successfully posted our data to storage.""" |
| 996 os.chdir(os.path.join(DART_INSTALL_LOCATION, 'tools', | 1035 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', |
| 997 'testing', 'perf_testing')) | 1036 'testing', 'perf_testing')) |
| 998 f = self.open_trace_file(afile, should_post_file) | 1037 f = self.open_trace_file(afile, should_post_file) |
| 999 tabulate_data = False | 1038 tabulate_data = False |
| 1000 revision_num = 0 | 1039 revision_num = 0 |
| 1001 upload_success = True | 1040 upload_success = True |
| 1002 for line in f.readlines(): | 1041 for line in f.readlines(): |
| 1003 tokens = line.split() | 1042 tokens = line.split() |
| 1004 if 'Revision' in line: | 1043 if 'Revision' in line: |
| 1005 revision_num = int(line.split()[1]) | 1044 revision_num = int(line.split()[1]) |
| 1006 else: | 1045 else: |
| 1007 for metric in self.test.values_list: | 1046 for metric in self.test.values_list: |
| 1008 if metric in line: | 1047 if metric in line: |
| 1009 num = tokens[0] | 1048 num = tokens[0] |
| 1010 if num.find('.') == -1: | 1049 if num.find('.') == -1: |
| 1011 num = int(num) | 1050 num = int(num) |
| 1012 else: | 1051 else: |
| 1013 num = float(num) | 1052 num = float(num) |
| 1014 self.test.values_dict['commandline']['frog'][metric] += [num] | 1053 self.test.values_dict['commandline']['dart2js'][metric] += [num] |
| 1015 self.test.revision_dict['commandline']['frog'][metric] += \ | 1054 self.test.revision_dict['commandline']['dart2js'][metric] += \ |
| 1016 [revision_num] | 1055 [revision_num] |
| 1017 score_type = self.get_score_type(metric) | 1056 score_type = self.get_score_type(metric) |
| 1018 if not self.test.test_runner.no_upload and should_post_file: | 1057 if not self.test.test_runner.no_upload and should_post_file: |
| 1019 if num < self.test.failure_threshold[metric]: | 1058 if num < self.test.failure_threshold[metric]: |
| 1020 num = 0 | 1059 num = 0 |
| 1021 upload_success = upload_success and self.report_results( | 1060 upload_success = upload_success and self.report_results( |
| 1022 metric, num, 'commandline', 'frog', revision_num, | 1061 metric, num, 'commandline', 'dart2js', revision_num, |
| 1023 score_type) | 1062 score_type) |
| 1024 else: | 1063 else: |
| 1025 upload_success = False | 1064 upload_success = False |
| 1026 if revision_num != 0: | 1065 if revision_num != 0: |
| 1027 for metric in self.test.values_list: | 1066 for metric in self.test.values_list: |
| 1028 try: | 1067 try: |
| 1029 self.test.revision_dict['commandline']['frog'][metric].pop() | 1068 self.test.revision_dict['commandline']['dart2js'][metric].pop() |
| 1030 self.test.revision_dict['commandline']['frog'][metric] += \ | 1069 self.test.revision_dict['commandline']['dart2js'][metric] += \ |
| 1031 [revision_num] | 1070 [revision_num] |
| 1032 # Fill in 0 if compilation failed. | 1071 # Fill in 0 if compilation failed. |
| 1033 if self.test.values_dict['commandline']['frog'][metric][-1] < \ | 1072 if self.test.values_dict['commandline']['dart2js'][metric][-1] < \ |
| 1034 self.test.failure_threshold[metric]: | 1073 self.test.failure_threshold[metric]: |
| 1035 self.test.values_dict['commandline']['frog'][metric] += [0] | 1074 self.test.values_dict['commandline']['dart2js'][metric] += [0] |
| 1036 self.test.revision_dict['commandline']['frog'][metric] += \ | 1075 self.test.revision_dict['commandline']['dart2js'][metric] += \ |
| 1037 [revision_num] | 1076 [revision_num] |
| 1038 except IndexError: | 1077 except IndexError: |
| 1039 # We tried to pop from an empty list. This happens if the first | 1078 # We tried to pop from an empty list. This happens if the first |
| 1040 # trace file we encounter is incomplete. | 1079 # trace file we encounter is incomplete. |
| 1041 pass | 1080 pass |
| 1042 | 1081 |
| 1043 f.close() | 1082 f.close() |
| 1044 return upload_success | 1083 return upload_success |
| 1045 | 1084 |
| 1046 def get_score_type(self, metric): | 1085 def get_score_type(self, metric): |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1075 """Update the set of CLs that do not need additional performance runs. | 1114 """Update the set of CLs that do not need additional performance runs. |
| 1076 Args: | 1115 Args: |
| 1077 revision_num: an additional number to be added to the 'done set' | 1116 revision_num: an additional number to be added to the 'done set' |
| 1078 """ | 1117 """ |
| 1079 filename = os.path.join(dirname(abspath(__file__)), 'cached_results.txt') | 1118 filename = os.path.join(dirname(abspath(__file__)), 'cached_results.txt') |
| 1080 if not os.path.exists(filename): | 1119 if not os.path.exists(filename): |
| 1081 f = open(filename, 'w') | 1120 f = open(filename, 'w') |
| 1082 results = set() | 1121 results = set() |
| 1083 pickle.dump(results, f) | 1122 pickle.dump(results, f) |
| 1084 f.close() | 1123 f.close() |
| 1085 f = open(filename, '+r') | 1124 f = open(filename, 'r+') |
| 1086 result_set = pickle.load(f) | 1125 result_set = pickle.load(f) |
| 1087 if revision_num: | 1126 if revision_num: |
| 1088 f.seek(0) | 1127 f.seek(0) |
| 1089 result_set.add(revision_num) | 1128 result_set.add(revision_num) |
| 1090 pickle.dump(result_set, f) | 1129 pickle.dump(result_set, f) |
| 1091 f.close() | 1130 f.close() |
| 1092 return result_set | 1131 return result_set |
| 1093 | 1132 |
| 1094 def main(): | 1133 def main(): |
| 1095 runner = TestRunner() | 1134 runner = TestRunner() |
| 1096 continuous = runner.parse_args() | 1135 continuous, incremental = runner.parse_args() |
| 1136 |
| 1137 if not os.path.exists(DART_REPO_LOC): |
| 1138 os.mkdir(dirname(DART_REPO_LOC)) |
| 1139 os.chdir(dirname(DART_REPO_LOC)) |
| 1140 p = subprocess.Popen('gclient config https://dart.googlecode.com/svn/' + |
| 1141 'branches/bleeding_edge/deps/all.deps', |
| 1142 stdout=subprocess.PIPE, stderr=subprocess.PIPE, |
| 1143 shell=True) |
| 1144 p.communicate() |
| 1097 if continuous: | 1145 if continuous: |
| 1098 while True: | 1146 while True: |
| 1099 results_set = update_set_of_done_cls() | 1147 results_set = update_set_of_done_cls() |
| 1100 if runner.has_new_code(): | 1148 if runner.has_new_code(): |
| 1101 runner.run_test_sequence() | 1149 runner.run_test_sequence() |
| 1102 else: | 1150 else: |
| 1103 # Try to get up to 10 runs of each CL, starting with the most recent CL | 1151 # Try to get up to 10 runs of each CL, starting with the most recent CL |
| 1104 # that does not yet have 10 runs. But only perform a set of extra runs | 1152 # that does not yet have 10 runs. But only perform a set of extra runs |
| 1105 # at most 10 at a time (get all the extra runs for one CL) before | 1153 # at most 10 at a time (get all the extra runs for one CL) before |
| 1106 # checking to see if new code has been checked in. | 1154 # checking to see if new code has been checked in. |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1122 a_test.file_processor.get_score_type(benchmark_name)) | 1170 a_test.file_processor.get_score_type(benchmark_name)) |
| 1123 if number_of_results < 10 and number_of_results >= 0: | 1171 if number_of_results < 10 and number_of_results >= 0: |
| 1124 run = runner.run_test_sequence(revision_num=str(revision_num), | 1172 run = runner.run_test_sequence(revision_num=str(revision_num), |
| 1125 num_reruns=(10-number_of_results)) | 1173 num_reruns=(10-number_of_results)) |
| 1126 if run == 0: | 1174 if run == 0: |
| 1127 has_run_extra = True | 1175 has_run_extra = True |
| 1128 results_set = update_set_of_done_cls(revision_num) | 1176 results_set = update_set_of_done_cls(revision_num) |
| 1129 revision_num -= 1 | 1177 revision_num -= 1 |
| 1130 # No more extra back-runs to do (for now). Wait for new code. | 1178 # No more extra back-runs to do (for now). Wait for new code. |
| 1131 time.sleep(200) | 1179 time.sleep(200) |
| 1180 elif incremental: |
| 1181 # This is a temporary measure to backfill old revisions. |
| 1182 # TODO(efortuna): Clean this up -- don't hard code numbers, make user |
| 1183 # specifiable. |
| 1184 revision_num = 9000 |
| 1185 while revision_num < 10600: |
| 1186 run = runner.run_test_sequence(revision_num=str(revision_num), |
| 1187 num_reruns=10) |
| 1188 revision_num += 1 |
| 1132 else: | 1189 else: |
| 1133 runner.run_test_sequence() | 1190 runner.run_test_sequence() |
| 1134 | 1191 |
| 1135 if __name__ == '__main__': | 1192 if __name__ == '__main__': |
| 1136 main() | 1193 main() |
| OLD | NEW |