Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(236)

Side by Side Diff: tools/testing/perf_testing/run_perf_tests.py

Issue 10829408: Smarter "new interesting code" detection. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/testing/webdriver_test_setup.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 2
3 # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 3 # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
4 # for details. All rights reserved. Use of this source code is governed by a 4 # for details. All rights reserved. Use of this source code is governed by a
5 # BSD-style license that can be found in the LICENSE file. 5 # BSD-style license that can be found in the LICENSE file.
6 6
7 import datetime 7 import datetime
8 import math 8 import math
9 import optparse 9 import optparse
10 import os 10 import os
(...skipping 27 matching lines...) Expand all
38 different svn revisions. It tests to see if there a newer version of the code on 38 different svn revisions. It tests to see if there a newer version of the code on
39 the server, and will sync and run the performance tests if so.""" 39 the server, and will sync and run the performance tests if so."""
40 class TestRunner(object): 40 class TestRunner(object):
41 41
42 def __init__(self): 42 def __init__(self):
43 self.verbose = False 43 self.verbose = False
44 self.has_shell = False 44 self.has_shell = False
45 if platform.system() == 'Windows': 45 if platform.system() == 'Windows':
46 # On Windows, shell must be true to get the correct environment variables. 46 # On Windows, shell must be true to get the correct environment variables.
47 self.has_shell = True 47 self.has_shell = True
48 self.current_revision_num = None
48 49
49 def run_cmd(self, cmd_list, outfile=None, append=False, std_in=''): 50 def RunCmd(self, cmd_list, outfile=None, append=False, std_in=''):
50 """Run the specified command and print out any output to stdout. 51 """Run the specified command and print out any output to stdout.
51 52
52 Args: 53 Args:
53 cmd_list: a list of strings that make up the command to run 54 cmd_list: a list of strings that make up the command to run
54 outfile: a string indicating the name of the file that we should write 55 outfile: a string indicating the name of the file that we should write
55 stdout to 56 stdout to
56 append: True if we want to append to the file instead of overwriting it 57 append: True if we want to append to the file instead of overwriting it
57 std_in: a string that should be written to the process executing to 58 std_in: a string that should be written to the process executing to
58 interact with it (if needed)""" 59 interact with it (if needed)"""
59 if self.verbose: 60 if self.verbose:
(...skipping 11 matching lines...) Expand all
71 out.seek(0, os.SEEK_END) 72 out.seek(0, os.SEEK_END)
72 p = subprocess.Popen(cmd_list, stdout = out, stderr=subprocess.PIPE, 73 p = subprocess.Popen(cmd_list, stdout = out, stderr=subprocess.PIPE,
73 stdin=subprocess.PIPE, shell=self.has_shell) 74 stdin=subprocess.PIPE, shell=self.has_shell)
74 output, stderr = p.communicate(std_in) 75 output, stderr = p.communicate(std_in)
75 if output: 76 if output:
76 print output 77 print output
77 if stderr: 78 if stderr:
78 print stderr 79 print stderr
79 return output, stderr 80 return output, stderr
80 81
81 def time_cmd(self, cmd): 82 def TimeCmd(self, cmd):
82 """Determine the amount of (real) time it takes to execute a given 83 """Determine the amount of (real) time it takes to execute a given
83 command.""" 84 command."""
84 start = time.time() 85 start = time.time()
85 self.run_cmd(cmd) 86 self.RunCmd(cmd)
86 return time.time() - start 87 return time.time() - start
87 88
88 def clear_out_unversioned_files(self): 89 def ClearOutUnversionedFiles(self):
89 """Remove all files that are unversioned by svn.""" 90 """Remove all files that are unversioned by svn."""
90 if os.path.exists(DART_REPO_LOC): 91 if os.path.exists(DART_REPO_LOC):
91 os.chdir(DART_REPO_LOC) 92 os.chdir(DART_REPO_LOC)
92 results, _ = self.run_cmd(['svn', 'st']) 93 results, _ = self.RunCmd(['svn', 'st'])
93 for line in results.split('\n'): 94 for line in results.split('\n'):
94 if line.startswith('?'): 95 if line.startswith('?'):
95 to_remove = line.split()[1] 96 to_remove = line.split()[1]
96 if os.path.isdir(to_remove): 97 if os.path.isdir(to_remove):
97 shutil.rmtree(to_remove)#, ignore_errors=True) 98 shutil.rmtree(to_remove)#, ignore_errors=True)
98 else: 99 else:
99 os.remove(to_remove) 100 os.remove(to_remove)
100 101
101 def get_archive(self, archive_name): 102 def GetArchive(self, archive_name):
102 """Wrapper around the pulling down a specific archive from Google Storage. 103 """Wrapper around the pulling down a specific archive from Google Storage.
103 Adds a specific revision argument as needed. 104 Adds a specific revision argument as needed.
104 Returns: The stderr from running this command.""" 105 Returns: The stdout and stderr from running this command."""
105 cmd = ['python', os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py'), 106 while True:
106 archive_name] 107 cmd = ['python', os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py'),
107 if self.current_revision_num != -1: 108 archive_name]
108 cmd += ['-r', self.current_revision_num] 109 if int(self.current_revision_num) != -1:
109 _, stderr = self.run_cmd(cmd) 110 cmd += ['-r', str(self.current_revision_num)]
110 return stderr 111 stdout, stderr = self.RunCmd(cmd)
112 if 'Please try again later' in stdout:
113 time.sleep(100)
114 else:
115 break
116 return (stdout, stderr)
111 117
112 def sync_and_build(self, suites, revision_num=''): 118 def _Sync(self, revision_num=None):
113 """Make sure we have the latest version of of the repo, and build it. We 119 """Update the repository to the latest or specified revision."""
114 begin and end standing in DART_REPO_LOC.
115
116 Args:
117 suites: The set of suites that we wish to build.
118
119 Returns:
120 err_code = 1 if there was a problem building."""
121 os.chdir(dirname(DART_REPO_LOC)) 120 os.chdir(dirname(DART_REPO_LOC))
122 self.clear_out_unversioned_files() 121 self.ClearOutUnversionedFiles()
123 if revision_num == '': 122 if not revision_num:
124 self.run_cmd(['gclient', 'sync']) 123 self.RunCmd(['gclient', 'sync'])
125 else: 124 else:
126 self.run_cmd(['gclient', 'sync', '-r', revision_num, '-t']) 125 self.RunCmd(['gclient', 'sync', '-r', str(revision_num), '-t'])
127 126
128 shutil.copytree(os.path.join(TOP_LEVEL_DIR, 'internal'), 127 shutil.copytree(os.path.join(TOP_LEVEL_DIR, 'internal'),
129 os.path.join(DART_REPO_LOC, 'internal')) 128 os.path.join(DART_REPO_LOC, 'internal'))
130 shutil.copy(os.path.join(TOP_LEVEL_DIR, 'tools', 'get_archive.py'), 129 shutil.copy(os.path.join(TOP_LEVEL_DIR, 'tools', 'get_archive.py'),
131 os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py')) 130 os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py'))
132 shutil.copy( 131 shutil.copy(
133 os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'run_selenium.py'), 132 os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'run_selenium.py'),
134 os.path.join(DART_REPO_LOC, 'tools', 'testing', 'run_selenium.py')) 133 os.path.join(DART_REPO_LOC, 'tools', 'testing', 'run_selenium.py'))
135 134
136 if revision_num == '': 135 def SyncAndBuild(self, suites, revision_num=None):
137 revision_num = search_for_revision() 136 """Make sure we have the latest version of of the repo, and build it. We
137 begin and end standing in DART_REPO_LOC.
138
139 Args:
140 suites: The set of suites that we wish to build.
141
142 Returns:
143 err_code = 1 if there was a problem building."""
144 self._Sync(revision_num)
145 if not revision_num:
146 revision_num = SearchForRevision()
138 147
139 self.current_revision_num = revision_num 148 self.current_revision_num = revision_num
140 stderr = self.get_archive('sdk') 149 stdout, stderr = self.GetArchive('sdk')
141 if not os.path.exists(os.path.join( 150 if (not os.path.exists(os.path.join(
142 DART_REPO_LOC, 'tools', 'get_archive.py')) \ 151 DART_REPO_LOC, 'tools', 'get_archive.py'))
143 or 'InvalidUriError' in stderr: 152 or 'InvalidUriError' in stderr or "Couldn't download" in stdout):
144 # Couldn't find the SDK on Google Storage. Build it locally. 153 # Couldn't find the SDK on Google Storage. Build it locally.
145 154
146 # On Windows, the output directory is marked as "Read Only," which causes 155 # On Windows, the output directory is marked as "Read Only," which causes
147 # an error to be thrown when we use shutil.rmtree. This helper function 156 # an error to be thrown when we use shutil.rmtree. This helper function
148 # changes the permissions so we can still delete the directory. 157 # changes the permissions so we can still delete the directory.
149 def on_rm_error(func, path, exc_info): 158 def on_rm_error(func, path, exc_info):
150 if os.path.exists(path): 159 if os.path.exists(path):
151 os.chmod(path, stat.S_IWRITE) 160 os.chmod(path, stat.S_IWRITE)
152 os.unlink(path) 161 os.unlink(path)
153 # TODO(efortuna): Currently always building ia32 architecture because we 162 # TODO(efortuna): Currently always building ia32 architecture because we
154 # don't have test statistics for what's passing on x64. Eliminate arch 163 # don't have test statistics for what's passing on x64. Eliminate arch
155 # specification when we have tests running on x64, too. 164 # specification when we have tests running on x64, too.
156 shutil.rmtree(os.path.join(os.getcwd(), 165 shutil.rmtree(os.path.join(os.getcwd(),
157 utils.GetBuildRoot(utils.GuessOS(), 'release', 'ia32')), 166 utils.GetBuildRoot(utils.GuessOS(), 'release', 'ia32')),
158 onerror=on_rm_error) 167 onerror=on_rm_error)
159 lines = self.run_cmd([os.path.join('.', 'tools', 'build.py'), '-m', 168 lines = self.RunCmd([os.path.join('.', 'tools', 'build.py'), '-m',
160 'release', '--arch=ia32', 'create_sdk']) 169 'release', '--arch=ia32', 'create_sdk'])
161 170
162 for line in lines: 171 for line in lines:
163 if 'BUILD FAILED' in line: 172 if 'BUILD FAILED' in line:
164 # Someone checked in a broken build! Stop trying to make it work 173 # Someone checked in a broken build! Stop trying to make it work
165 # and wait to try again. 174 # and wait to try again.
166 print 'Broken Build' 175 print 'Broken Build'
167 return 1 176 return 1
168 return 0 177 return 0
169 178
170 def ensure_output_directory(self, dir_name): 179 def EnsureOutputDirectory(self, dir_name):
171 """Test that the listed directory name exists, and if not, create one for 180 """Test that the listed directory name exists, and if not, create one for
172 our output to be placed. 181 our output to be placed.
173 182
174 Args: 183 Args:
175 dir_name: the directory we will create if it does not exist.""" 184 dir_name: the directory we will create if it does not exist."""
176 dir_path = os.path.join(TOP_LEVEL_DIR, 'tools', 185 dir_path = os.path.join(TOP_LEVEL_DIR, 'tools',
177 'testing', 'perf_testing', dir_name) 186 'testing', 'perf_testing', dir_name)
178 if not os.path.exists(dir_path): 187 if not os.path.exists(dir_path):
179 os.makedirs(dir_path) 188 os.makedirs(dir_path)
180 print 'Creating output directory ', dir_path 189 print 'Creating output directory ', dir_path
181 190
182 def has_interesting_code(self, past_revision_num=None): 191 def HasInterestingCode(self, revision_num=None):
183 """Tests if there are any versions of files that might change performance 192 """Tests if there are any versions of files that might change performance
184 results on the server.""" 193 results on the server.
194
195 Returns:
196 (False, None): There is no interesting code to run.
197 (True, revisionNumber): There is interesting code to run at revision
198 revisionNumber.
199 (True, None): There is interesting code to run by syncing to the
200 tip-of-tree."""
185 if not os.path.exists(DART_REPO_LOC): 201 if not os.path.exists(DART_REPO_LOC):
186 return True 202 self._Sync()
187 os.chdir(DART_REPO_LOC) 203 os.chdir(DART_REPO_LOC)
188 no_effect = ['client', 'compiler', 'editor', 'pkg', 'samples', 'tests', 204 no_effect = ['dart/client', 'dart/compiler', 'dart/editor',
189 'third_party', 'tools', 'utils'] 205 'dart/lib/html/doc', 'dart/pkg', 'dart/tests', 'dart/samples',
190 # Pass 'p' in if we have a new certificate for the svn server, we want to 206 'dart/lib/dartdoc', 'dart/lib/i18n', 'dart/lib/unittest',
191 # (p)ermanently accept it. 207 'dart/tools/dartc', 'dart/tools/get_archive.py',
192 if past_revision_num: 208 'dart/tools/test.py', 'dart/tools/testing',
209 'dart/tools/utils', 'dart/third_party', 'dart/utils']
210 definitely_yes = ['dart/samples/third_party/dromaeo',
211 'dart/lib/html/dart2js', 'dart/lib/html/dartium',
212 'dart/lib/scripts', 'dart/lib/src',
213 'dart/third_party/WebCore']
214 def GetFileList(revision):
215 """Determine the set of files that were changed for a particular
216 revision."""
193 # TODO(efortuna): This assumes you're using svn. Have a git fallback as 217 # TODO(efortuna): This assumes you're using svn. Have a git fallback as
194 # well. 218 # well. Pass 'p' in if we have a new certificate for the svn server, we
195 results, _ = self.run_cmd(['svn', 'log', '-v', '-r', 219 # want to (p)ermanently accept it.
196 str(past_revision_num)], std_in='p\r\n') 220 results, _ = self.RunCmd([
221 'svn', 'log', 'http://dart.googlecode.com/svn/branches/bleeding_edge',
222 '-v', '-r', str(revision)], std_in='p\r\n')
197 results = results.split('\n') 223 results = results.split('\n')
198 if len(results) <= 3: 224 if len(results) <= 3:
199 results = [] 225 return []
200 else: 226 else:
201 # Trim off the details about revision number and commit message. We're 227 # Trim off the details about revision number and commit message. We're
202 # only interested in the files that are changed. 228 # only interested in the files that are changed.
203 results = results[3:] 229 results = results[3:]
204 changed_files = [] 230 changed_files = []
205 for result in results: 231 for result in results:
206 if result == '': 232 if len(result) <= 1:
207 break 233 break
208 changed_files += [result.replace('/branches/bleeding_edge/dart/', '')] 234 tokens = result.split()
209 results = changed_files 235 if len(tokens) > 1:
236 changed_files += [tokens[1].replace('/branches/bleeding_edge/', '')]
237 return changed_files
238
239 def HasPerfAffectingResults(files_list):
240 """Determine if this set of changed files might effect performance
241 tests."""
242 def IsSafeFile(f):
243 if not any(f.startswith(prefix) for prefix in definitely_yes):
244 return any(f.startswith(prefix) for prefix in no_effect)
245 return False
246 return not all(IsSafeFile(f) for f in files_list)
247
248 if revision_num:
249 return (HasPerfAffectingResults(GetFileList(
250 revision_num)), revision_num)
210 else: 251 else:
211 results, _ = self.run_cmd(['svn', 'st', '-u'], std_in='p\r\n') 252 results, _ = self.RunCmd(['svn', 'st', '-u'], std_in='p\r\n')
212 results = results.split('\n') 253 latest_interesting_server_rev = int(results.split('\n')[-2].split()[-1])
213 for line in results: 254 if self.backfill:
214 tokens = line.split() 255 done_cls = list(UpdateSetOfDoneCls())
215 if past_revision_num or len(tokens) >= 3 and '*' in tokens[-3]: 256 done_cls.sort()
216 # Loop through the changed files to see if it contains any files that 257 if done_cls:
217 # are NOT listed in the no_effect list (directories not listed in 258 last_done_cl = int(done_cls[-1])
218 # the "no_effect" list are assumed to potentially affect performance. 259 else:
219 if not reduce(lambda x, y: x or y, 260 last_done_cl = EARLIEST_REVISION
220 [tokens[-1].startswith(item) for item in no_effect], False): 261 while latest_interesting_server_rev >= last_done_cl:
221 return True 262 file_list = GetFileList(latest_interesting_server_rev)
222 return False 263 if HasPerfAffectingResults(file_list):
264 return (True, latest_interesting_server_rev)
265 else:
266 UpdateSetOfDoneCls(latest_interesting_server_rev)
267 latest_interesting_server_rev -= 1
268 else:
269 last_done_cl = int(SearchForRevision(DART_REPO_LOC)) + 1
270 while last_done_cl <= latest_interesting_server_rev:
271 file_list = GetFileList(last_done_cl)
272 if HasPerfAffectingResults(file_list):
273 return (True, last_done_cl)
274 else:
275 UpdateSetOfDoneCls(last_done_cl)
276 last_done_cl += 1
277 return (False, None)
223 278
224 def get_os_directory(self): 279 def GetOsDirectory(self):
225 """Specifies the name of the directory for the testing build of dart, which 280 """Specifies the name of the directory for the testing build of dart, which
226 has yet a different naming convention from utils.getBuildRoot(...).""" 281 has yet a different naming convention from utils.getBuildRoot(...)."""
227 if platform.system() == 'Windows': 282 if platform.system() == 'Windows':
228 return 'windows' 283 return 'windows'
229 elif platform.system() == 'Darwin': 284 elif platform.system() == 'Darwin':
230 return 'macos' 285 return 'macos'
231 else: 286 else:
232 return 'linux' 287 return 'linux'
233 288
234 def parse_args(self): 289 def ParseArgs(self):
235 parser = optparse.OptionParser() 290 parser = optparse.OptionParser()
236 parser.add_option('--suites', '-s', dest='suites', help='Run the specified ' 291 parser.add_option('--suites', '-s', dest='suites', help='Run the specified '
237 'comma-separated test suites from set: %s' % \ 292 'comma-separated test suites from set: %s' % \
238 ','.join(TestBuilder.available_suite_names()), 293 ','.join(TestBuilder.AvailableSuiteNames()),
239 action='store', default=None) 294 action='store', default=None)
240 parser.add_option('--forever', '-f', dest='continuous', help='Run this scri' 295 parser.add_option('--forever', '-f', dest='continuous', help='Run this scri'
241 'pt forever, always checking for the next svn checkin', 296 'pt forever, always checking for the next svn checkin',
242 action='store_true', default=False) 297 action='store_true', default=False)
243 parser.add_option('--nobuild', '-n', dest='no_build', action='store_true', 298 parser.add_option('--nobuild', '-n', dest='no_build', action='store_true',
244 help='Do not sync with the repository and do not ' 299 help='Do not sync with the repository and do not '
245 'rebuild.', default=False) 300 'rebuild.', default=False)
246 parser.add_option('--noupload', '-u', dest='no_upload', action='store_true', 301 parser.add_option('--noupload', '-u', dest='no_upload', action='store_true',
247 help='Do not post the results of the run.', default=False) 302 help='Do not post the results of the run.', default=False)
248 parser.add_option('--notest', '-t', dest='no_test', action='store_true', 303 parser.add_option('--notest', '-t', dest='no_test', action='store_true',
249 help='Do not run the tests.', default=False) 304 help='Do not run the tests.', default=False)
250 parser.add_option('--verbose', '-v', dest='verbose', help='Print extra ' 305 parser.add_option('--verbose', '-v', dest='verbose',
251 'debug output', action='store_true', default=False) 306 help='Print extra debug output', action='store_true',
307 default=False)
308 parser.add_option('--backfill', '-b', dest='backfill',
309 help='Backfill earlier CLs with additional results when '
310 'there is idle time.', action='store_true',
311 default=False)
252 312
253 args, ignored = parser.parse_args() 313 args, ignored = parser.parse_args()
254 314
255 if not args.suites: 315 if not args.suites:
256 suites = TestBuilder.available_suite_names() 316 suites = TestBuilder.AvailableSuiteNames()
257 else: 317 else:
258 suites = [] 318 suites = []
259 suitelist = args.suites.split(',') 319 suitelist = args.suites.split(',')
260 for name in suitelist: 320 for name in suitelist:
261 if name in TestBuilder.available_suite_names(): 321 if name in TestBuilder.AvailableSuiteNames():
262 suites.append(name) 322 suites.append(name)
263 else: 323 else:
264 print ('Error: Invalid suite %s not in ' % name) + \ 324 print ('Error: Invalid suite %s not in ' % name) + \
265 '%s' % ','.join(TestBuilder.available_suite_names()) 325 '%s' % ','.join(TestBuilder.AvailableSuiteNames())
266 sys.exit(1) 326 sys.exit(1)
267 self.suite_names = suites 327 self.suite_names = suites
268 self.no_build = args.no_build 328 self.no_build = args.no_build
269 self.no_upload = args.no_upload 329 self.no_upload = args.no_upload
270 self.no_test = args.no_test 330 self.no_test = args.no_test
271 self.verbose = args.verbose 331 self.verbose = args.verbose
332 self.backfill = args.backfill
272 return args.continuous 333 return args.continuous
273 334
274 def run_test_sequence(self, revision_num='', num_reruns=1): 335 def RunTestSequence(self, revision_num=None, num_reruns=1):
275 """Run the set of commands to (possibly) build, run, and post the results 336 """Run the set of commands to (possibly) build, run, and post the results
276 of our tests. Returns 0 on a successful run, 1 if we fail to post results or 337 of our tests. Returns 0 on a successful run, 1 if we fail to post results or
277 the run failed, -1 if the build is broken. 338 the run failed, -1 if the build is broken.
278 """ 339 """
279 suites = [] 340 suites = []
280 success = True 341 success = True
281 if not self.no_build and self.sync_and_build(suites, revision_num) == 1: 342 if not self.no_build and self.SyncAndBuild(suites, revision_num) == 1:
282 return -1 # The build is broken. 343 return -1 # The build is broken.
283 344
345 if not self.current_revision_num:
346 self.current_revision_num = SearchForRevision(DART_REPO_LOC)
347
284 for name in self.suite_names: 348 for name in self.suite_names:
285 for run in range(num_reruns): 349 for run in range(num_reruns):
286 suites += [TestBuilder.make_test(name, self)] 350 suites += [TestBuilder.MakeTest(name, self)]
287 351
288 for test in suites: 352 for test in suites:
289 success = success and test.run() 353 success = success and test.Run()
290 if success: 354 if success:
291 return 0 355 return 0
292 else: 356 else:
293 return 1 357 return 1
294 358
295 359
296 class Test(object): 360 class Test(object):
297 """The base class to provide shared code for different tests we will run and 361 """The base class to provide shared code for different tests we will run and
298 post. At a high level, each test has three visitors (the tester and the 362 post. At a high level, each test has three visitors (the tester and the
299 file_processor) that perform operations on the test object.""" 363 file_processor) that perform operations on the test object."""
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
336 for f in variants: 400 for f in variants:
337 self.revision_dict[platform][f] = dict() 401 self.revision_dict[platform][f] = dict()
338 self.values_dict[platform][f] = dict() 402 self.values_dict[platform][f] = dict()
339 for val in values_list: 403 for val in values_list:
340 self.revision_dict[platform][f][val] = [] 404 self.revision_dict[platform][f][val] = []
341 self.values_dict[platform][f][val] = [] 405 self.values_dict[platform][f][val] = []
342 for extra_metric in extra_metrics: 406 for extra_metric in extra_metrics:
343 self.revision_dict[platform][f][extra_metric] = [] 407 self.revision_dict[platform][f][extra_metric] = []
344 self.values_dict[platform][f][extra_metric] = [] 408 self.values_dict[platform][f][extra_metric] = []
345 409
346 def is_valid_combination(self, platform, variant): 410 def IsValidCombination(self, platform, variant):
347 """Check whether data should be captured for this platform/variant 411 """Check whether data should be captured for this platform/variant
348 combination. 412 combination.
349 """ 413 """
350 # TODO(vsm): This avoids a bug in 32-bit Chrome (dartium) 414 # TODO(vsm): This avoids a bug in 32-bit Chrome (dartium)
351 # running JS dromaeo. 415 # running JS dromaeo.
352 if platform == 'dartium' and variant == 'js': 416 if platform == 'dartium' and variant == 'js':
353 return False 417 return False
354 if (platform == 'safari' and variant == 'dart2js' and 418 if (platform == 'safari' and variant == 'dart2js' and
355 int(self.test_runner.current_revision_num) < 10193): 419 int(self.test_runner.current_revision_num) < 10193):
356 # In revision 10193 we fixed a bug that allows Safari 6 to run dart2js 420 # In revision 10193 we fixed a bug that allows Safari 6 to run dart2js
357 # code. Since we can't change the Safari version on the machine, we're 421 # code. Since we can't change the Safari version on the machine, we're
358 # just not running 422 # just not running
359 # for this case. 423 # for this case.
360 return False 424 return False
361 return True 425 return True
362 426
363 def run(self): 427 def Run(self):
364 """Run the benchmarks/tests from the command line and plot the 428 """Run the benchmarks/tests from the command line and plot the
365 results. 429 results.
366 """ 430 """
367 for visitor in [self.tester, self.file_processor]: 431 for visitor in [self.tester, self.file_processor]:
368 visitor.prepare() 432 visitor.Prepare()
369 433
370 os.chdir(TOP_LEVEL_DIR) 434 os.chdir(TOP_LEVEL_DIR)
371 self.test_runner.ensure_output_directory(self.result_folder_name) 435 self.test_runner.EnsureOutputDirectory(self.result_folder_name)
372 self.test_runner.ensure_output_directory(os.path.join( 436 self.test_runner.EnsureOutputDirectory(os.path.join(
373 'old', self.result_folder_name)) 437 'old', self.result_folder_name))
374 os.chdir(DART_REPO_LOC) 438 os.chdir(DART_REPO_LOC)
375 if not self.test_runner.no_test: 439 if not self.test_runner.no_test:
376 self.tester.run_tests() 440 self.tester.RunTests()
377 441
378 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'perf_testing')) 442 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'perf_testing'))
379 443
380 files = os.listdir(self.result_folder_name) 444 files = os.listdir(self.result_folder_name)
381 post_success = True 445 post_success = True
382 for afile in files: 446 for afile in files:
383 if not afile.startswith('.'): 447 if not afile.startswith('.'):
384 should_move_file = self.file_processor.process_file(afile, True) 448 should_move_file = self.file_processor.ProcessFile(afile, True)
385 if should_move_file: 449 if should_move_file:
386 shutil.move(os.path.join(self.result_folder_name, afile), 450 shutil.move(os.path.join(self.result_folder_name, afile),
387 os.path.join('old', self.result_folder_name, afile)) 451 os.path.join('old', self.result_folder_name, afile))
388 else: 452 else:
389 post_success = False 453 post_success = False
390 454
391 return post_success 455 return post_success
392 456
393 457
394 class Tester(object): 458 class Tester(object):
395 """The base level visitor class that runs tests. It contains convenience 459 """The base level visitor class that runs tests. It contains convenience
396 methods that many Tester objects use. Any class that would like to be a 460 methods that many Tester objects use. Any class that would like to be a
397 TesterVisitor must implement the run_tests() method.""" 461 TesterVisitor must implement the RunTests() method."""
398 462
399 def __init__(self, test): 463 def __init__(self, test):
400 self.test = test 464 self.test = test
401 465
402 def prepare(self): 466 def Prepare(self):
403 """Perform any initial setup required before the test is run.""" 467 """Perform any initial setup required before the test is run."""
404 pass 468 pass
405 469
406 def add_svn_revision_to_trace(self, outfile, browser = None): 470 def AddSvnRevisionToTrace(self, outfile, browser = None):
407 """Add the svn version number to the provided tracefile.""" 471 """Add the svn version number to the provided tracefile."""
408 def get_dartium_revision(): 472 def get_dartium_revision():
409 version_file_name = os.path.join(DART_REPO_LOC, 'client', 'tests', 473 version_file_name = os.path.join(DART_REPO_LOC, 'client', 'tests',
410 'dartium', 'LAST_VERSION') 474 'dartium', 'LAST_VERSION')
411 version_file = open(version_file_name, 'r') 475 version_file = open(version_file_name, 'r')
412 version = version_file.read().split('.')[-2] 476 version = version_file.read().split('.')[-2]
413 version_file.close() 477 version_file.close()
414 return version 478 return version
415 479
416 if browser and browser == 'dartium': 480 if browser and browser == 'dartium':
417 revision = get_dartium_revision() 481 revision = get_dartium_revision()
418 self.test.test_runner.run_cmd(['echo', 'Revision: ' + revision], outfile) 482 self.test.test_runner.RunCmd(['echo', 'Revision: ' + revision], outfile)
419 else: 483 else:
420 revision = search_for_revision() 484 revision = SearchForRevision()
421 self.test.test_runner.run_cmd(['echo', 'Revision: ' + revision], outfile) 485 self.test.test_runner.RunCmd(['echo', 'Revision: ' + revision], outfile)
422 486
423 487
424 class Processor(object): 488 class Processor(object):
425 """The base level vistor class that processes tests. It contains convenience 489 """The base level vistor class that processes tests. It contains convenience
426 methods that many File Processor objects use. Any class that would like to be 490 methods that many File Processor objects use. Any class that would like to be
427 a ProcessorVisitor must implement the process_file() method.""" 491 a ProcessorVisitor must implement the ProcessFile() method."""
428 492
429 SCORE = 'Score' 493 SCORE = 'Score'
430 COMPILE_TIME = 'CompileTime' 494 COMPILE_TIME = 'CompileTime'
431 CODE_SIZE = 'CodeSize' 495 CODE_SIZE = 'CodeSize'
432 496
433 def __init__(self, test): 497 def __init__(self, test):
434 self.test = test 498 self.test = test
435 499
436 def prepare(self): 500 def Prepare(self):
437 """Perform any initial setup required before the test is run.""" 501 """Perform any initial setup required before the test is run."""
438 pass 502 pass
439 503
440 def open_trace_file(self, afile, not_yet_uploaded): 504 def OpenTraceFile(self, afile, not_yet_uploaded):
441 """Find the correct location for the trace file, and open it. 505 """Find the correct location for the trace file, and open it.
442 Args: 506 Args:
443 afile: The tracefile name. 507 afile: The tracefile name.
444 not_yet_uploaded: True if this file is to be found in a directory that 508 not_yet_uploaded: True if this file is to be found in a directory that
445 contains un-uploaded data. 509 contains un-uploaded data.
446 Returns: A file object corresponding to the given file name.""" 510 Returns: A file object corresponding to the given file name."""
447 file_path = os.path.join(self.test.result_folder_name, afile) 511 file_path = os.path.join(self.test.result_folder_name, afile)
448 if not not_yet_uploaded: 512 if not not_yet_uploaded:
449 file_path = os.path.join('old', file_path) 513 file_path = os.path.join('old', file_path)
450 return open(file_path) 514 return open(file_path)
451 515
452 def report_results(self, benchmark_name, score, platform, variant, 516 def ReportResults(self, benchmark_name, score, platform, variant,
453 revision_number, metric): 517 revision_number, metric):
454 """Store the results of the benchmark run. 518 """Store the results of the benchmark run.
455 Args: 519 Args:
456 benchmark_name: The name of the individual benchmark. 520 benchmark_name: The name of the individual benchmark.
457 score: The numerical value of this benchmark. 521 score: The numerical value of this benchmark.
458 platform: The platform the test was run on (firefox, command line, etc). 522 platform: The platform the test was run on (firefox, command line, etc).
459 variant: Specifies whether the data was about generated Frog, js, a 523 variant: Specifies whether the data was about generated Frog, js, a
460 combination of both, or Dart depending on the test. 524 combination of both, or Dart depending on the test.
461 revision_number: The revision of the code (and sometimes the revision of 525 revision_number: The revision of the code (and sometimes the revision of
462 dartium). 526 dartium).
463 527
464 Returns: True if the post was successful file.""" 528 Returns: True if the post was successful file."""
465 return post_results.report_results(benchmark_name, score, platform, variant, 529 return post_results.report_results(benchmark_name, score, platform, variant,
466 revision_number, metric) 530 revision_number, metric)
467 531
468 def calculate_geometric_mean(self, platform, variant, svn_revision): 532 def CalculateGeometricMean(self, platform, variant, svn_revision):
469 """Calculate the aggregate geometric mean for JS and dart2js benchmark sets, 533 """Calculate the aggregate geometric mean for JS and dart2js benchmark sets,
470 given two benchmark dictionaries.""" 534 given two benchmark dictionaries."""
471 geo_mean = 0 535 geo_mean = 0
472 if self.test.is_valid_combination(platform, variant): 536 if self.test.IsValidCombination(platform, variant):
473 for benchmark in self.test.values_list: 537 for benchmark in self.test.values_list:
538 if not self.test.values_dict[platform][variant][benchmark]:
539 print 'Error determining mean for %s %s %s' % (platform, variant,
540 benchmark)
541 continue
474 geo_mean += math.log( 542 geo_mean += math.log(
475 self.test.values_dict[platform][variant][benchmark][ 543 self.test.values_dict[platform][variant][benchmark][-1])
476 len(self.test.values_dict[platform][variant][benchmark]) - 1])
477 544
478 self.test.values_dict[platform][variant]['Geo-Mean'] += \ 545 self.test.values_dict[platform][variant]['Geo-Mean'] += \
479 [math.pow(math.e, geo_mean / len(self.test.values_list))] 546 [math.pow(math.e, geo_mean / len(self.test.values_list))]
480 self.test.revision_dict[platform][variant]['Geo-Mean'] += [svn_revision] 547 self.test.revision_dict[platform][variant]['Geo-Mean'] += [svn_revision]
481 548
482 def get_score_type(self, benchmark_name): 549 def GetScoreType(self, benchmark_name):
483 """Determine the type of score for posting -- default is 'Score' (aka 550 """Determine the type of score for posting -- default is 'Score' (aka
484 Runtime), other options are CompileTime and CodeSize.""" 551 Runtime), other options are CompileTime and CodeSize."""
485 return self.SCORE 552 return self.SCORE
486 553
487 554
488 class RuntimePerformanceTest(Test): 555 class RuntimePerformanceTest(Test):
489 """Super class for all runtime performance testing.""" 556 """Super class for all runtime performance testing."""
490 557
491 def __init__(self, result_folder_name, platform_list, platform_type, 558 def __init__(self, result_folder_name, platform_list, platform_type,
492 versions, benchmarks, test_runner, tester, file_processor): 559 versions, benchmarks, test_runner, tester, file_processor):
(...skipping 18 matching lines...) Expand all
511 platform_list, versions, benchmarks, test_runner, tester, 578 platform_list, versions, benchmarks, test_runner, tester,
512 file_processor) 579 file_processor)
513 self.platform_list = platform_list 580 self.platform_list = platform_list
514 self.platform_type = platform_type 581 self.platform_type = platform_type
515 self.versions = versions 582 self.versions = versions
516 self.benchmarks = benchmarks 583 self.benchmarks = benchmarks
517 584
518 585
519 class BrowserTester(Tester): 586 class BrowserTester(Tester):
520 @staticmethod 587 @staticmethod
521 def get_browsers(add_dartium=True): 588 def GetBrowsers(add_dartium=True):
522 browsers = []#['ff', 'chrome'] 589 browsers = ['ff', 'chrome']
523 if add_dartium: 590 if add_dartium:
524 browsers += ['dartium'] 591 browsers += ['dartium']
525 has_shell = False 592 has_shell = False
526 if platform.system() == 'Darwin': 593 if platform.system() == 'Darwin':
527 browsers += ['safari'] 594 browsers += ['safari']
528 if platform.system() == 'Windows': 595 if platform.system() == 'Windows':
529 browsers += ['ie'] 596 browsers += ['ie']
530 has_shell = True 597 has_shell = True
531 return browsers 598 return browsers
532 599
533 600
534 class CommonBrowserTest(RuntimePerformanceTest): 601 class CommonBrowserTest(RuntimePerformanceTest):
535 """Runs this basic performance tests (Benchpress, some V8 benchmarks) in the 602 """Runs this basic performance tests (Benchpress, some V8 benchmarks) in the
536 browser.""" 603 browser."""
537 604
538 def __init__(self, test_runner): 605 def __init__(self, test_runner):
539 """Args: 606 """Args:
540 test_runner: Reference to the object that notifies us when to run.""" 607 test_runner: Reference to the object that notifies us when to run."""
541 super(CommonBrowserTest, self).__init__( 608 super(CommonBrowserTest, self).__init__(
542 self.name(), BrowserTester.get_browsers(False), 609 self.Name(), BrowserTester.GetBrowsers(False),
543 'browser', ['js', 'dart2js'], 610 'browser', ['js', 'dart2js'],
544 self.get_standalone_benchmarks(), test_runner, 611 self.GetStandaloneBenchmarks(), test_runner,
545 self.CommonBrowserTester(self), 612 self.CommonBrowserTester(self),
546 self.CommonBrowserFileProcessor(self)) 613 self.CommonBrowserFileProcessor(self))
547 614
548 @staticmethod 615 @staticmethod
549 def name(): 616 def Name():
550 return 'browser-perf' 617 return 'browser-perf'
551 618
552 @staticmethod 619 @staticmethod
553 def get_standalone_benchmarks(): 620 def GetStandaloneBenchmarks():
554 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees', 621 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees',
555 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute', 622 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute',
556 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers', 623 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers',
557 'TreeSort'] 624 'TreeSort']
558 625
559 class CommonBrowserTester(BrowserTester): 626 class CommonBrowserTester(BrowserTester):
560 def run_tests(self): 627 def RunTests(self):
561 """Run a performance test in the browser.""" 628 """Run a performance test in the browser."""
562 os.chdir(DART_REPO_LOC) 629 os.chdir(DART_REPO_LOC)
563 self.test.test_runner.run_cmd([ 630 self.test.test_runner.RunCmd([
564 'python', os.path.join('internal', 'browserBenchmarks', 631 'python', os.path.join('internal', 'browserBenchmarks',
565 'make_web_benchmarks.py')]) 632 'make_web_benchmarks.py')])
566 633
567 for browser in self.test.platform_list: 634 for browser in self.test.platform_list:
568 for version in self.test.versions: 635 for version in self.test.versions:
569 if not self.test.is_valid_combination(browser, version): 636 if not self.test.IsValidCombination(browser, version):
570 continue 637 continue
571 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, 638 self.test.trace_file = os.path.join(TOP_LEVEL_DIR,
572 'tools', 'testing', 'perf_testing', self.test.result_folder_name, 639 'tools', 'testing', 'perf_testing', self.test.result_folder_name,
573 'perf-%s-%s-%s' % (self.test.cur_time, browser, version)) 640 'perf-%s-%s-%s' % (self.test.cur_time, browser, version))
574 self.add_svn_revision_to_trace(self.test.trace_file, browser) 641 self.AddSvnRevisionToTrace(self.test.trace_file, browser)
575 file_path = os.path.join( 642 file_path = os.path.join(
576 os.getcwd(), 'internal', 'browserBenchmarks', 643 os.getcwd(), 'internal', 'browserBenchmarks',
577 'benchmark_page_%s.html' % version) 644 'benchmark_page_%s.html' % version)
578 self.test.test_runner.run_cmd( 645 self.test.test_runner.RunCmd(
579 ['python', os.path.join('tools', 'testing', 'run_selenium.py'), 646 ['python', os.path.join('tools', 'testing', 'run_selenium.py'),
580 '--out', file_path, '--browser', browser, 647 '--out', file_path, '--browser', browser,
581 '--timeout', '600', '--mode', 'perf'], self.test.trace_file, 648 '--timeout', '600', '--mode', 'perf'], self.test.trace_file,
582 append=True) 649 append=True)
583 650
584 class CommonBrowserFileProcessor(Processor): 651 class CommonBrowserFileProcessor(Processor):
585 652
586 def process_file(self, afile, should_post_file): 653 def ProcessFile(self, afile, should_post_file):
587 """Comb through the html to find the performance results. 654 """Comb through the html to find the performance results.
588 Returns: True if we successfully posted our data to storage and/or we can 655 Returns: True if we successfully posted our data to storage and/or we can
589 delete the trace file.""" 656 delete the trace file."""
590 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', 657 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools',
591 'testing', 'perf_testing')) 658 'testing', 'perf_testing'))
592 parts = afile.split('-') 659 parts = afile.split('-')
593 browser = parts[2] 660 browser = parts[2]
594 version = parts[3] 661 version = parts[3]
595 f = self.open_trace_file(afile, should_post_file) 662 f = self.OpenTraceFile(afile, should_post_file)
596 lines = f.readlines() 663 lines = f.readlines()
597 line = '' 664 line = ''
598 i = 0 665 i = 0
599 revision_num = 0 666 revision_num = 0
600 while '<div id="results">' not in line and i < len(lines): 667 while '<div id="results">' not in line and i < len(lines):
601 if 'Revision' in line: 668 if 'Revision' in line:
602 revision_num = int(line.split()[1].strip('"')) 669 revision_num = int(line.split()[1].strip('"'))
603 line = lines[i] 670 line = lines[i]
604 i += 1 671 i += 1
605 672
(...skipping 16 matching lines...) Expand all
622 if len(name_and_score) < 2: 689 if len(name_and_score) < 2:
623 break 690 break
624 name = name_and_score[0].strip() 691 name = name_and_score[0].strip()
625 score = name_and_score[1].strip() 692 score = name_and_score[1].strip()
626 if version == 'js' or version == 'v8': 693 if version == 'js' or version == 'v8':
627 version = 'js' 694 version = 'js'
628 bench_dict = self.test.values_dict[browser][version] 695 bench_dict = self.test.values_dict[browser][version]
629 bench_dict[name] += [float(score)] 696 bench_dict[name] += [float(score)]
630 self.test.revision_dict[browser][version][name] += [revision_num] 697 self.test.revision_dict[browser][version][name] += [revision_num]
631 if not self.test.test_runner.no_upload and should_post_file: 698 if not self.test.test_runner.no_upload and should_post_file:
632 upload_success = upload_success and self.report_results( 699 upload_success = upload_success and self.ReportResults(
633 name, score, browser, version, revision_num, 700 name, score, browser, version, revision_num,
634 self.get_score_type(name)) 701 self.GetScoreType(name))
635 else: 702 else:
636 upload_success = False 703 upload_success = False
637 704
638 f.close() 705 f.close()
639 self.calculate_geometric_mean(browser, version, revision_num) 706 self.CalculateGeometricMean(browser, version, revision_num)
640 return upload_success 707 return upload_success
641 708
642 709
643 class DromaeoTester(Tester): 710 class DromaeoTester(Tester):
644 DROMAEO_BENCHMARKS = { 711 DROMAEO_BENCHMARKS = {
645 'attr': ('attributes', [ 712 'attr': ('attributes', [
646 'getAttribute', 713 'getAttribute',
647 'element.property', 714 'element.property',
648 'setAttribute', 715 'setAttribute',
649 'element.property = value']), 716 'element.property = value']),
(...skipping 17 matching lines...) Expand all
667 'traverse': ('traverse', [ 734 'traverse': ('traverse', [
668 'firstChild', 735 'firstChild',
669 'lastChild', 736 'lastChild',
670 'nextSibling', 737 'nextSibling',
671 'previousSibling', 738 'previousSibling',
672 'childNodes']) 739 'childNodes'])
673 } 740 }
674 741
675 # Use filenames that don't have unusual characters for benchmark names. 742 # Use filenames that don't have unusual characters for benchmark names.
676 @staticmethod 743 @staticmethod
677 def legalize_filename(str):» 744 def LegalizeFilename(str):
678 remap = { 745 remap = {
679 ' ': '_', 746 ' ': '_',
680 '(': '_', 747 '(': '_',
681 ')': '_', 748 ')': '_',
682 '*': 'ALL', 749 '*': 'ALL',
683 '=': 'ASSIGN', 750 '=': 'ASSIGN',
684 } 751 }
685 for (old, new) in remap.iteritems(): 752 for (old, new) in remap.iteritems():
686 str = str.replace(old, new) 753 str = str.replace(old, new)
687 return str 754 return str
688 755
689 # TODO(vsm): This is a hack to skip breaking tests. Triage this 756 # TODO(vsm): This is a hack to skip breaking tests. Triage this
690 # failure properly. The modify suite fails on 32-bit chrome, which 757 # failure properly. The modify suite fails on 32-bit chrome, which
691 # is the default on mac and win. 758 # is the default on mac and win.
692 @staticmethod 759 @staticmethod
693 def get_valid_dromaeo_tags(): 760 def GetValidDromaeoTags():
694 tags = [tag for (tag, _) in DromaeoTester.DROMAEO_BENCHMARKS.values()] 761 tags = [tag for (tag, _) in DromaeoTester.DROMAEO_BENCHMARKS.values()]
695 if platform.system() == 'Darwin' or platform.system() == 'Windows': 762 if platform.system() == 'Darwin' or platform.system() == 'Windows':
696 tags.remove('modify') 763 tags.remove('modify')
697 return tags 764 return tags
698 765
699 @staticmethod 766 @staticmethod
700 def get_dromaeo_benchmarks(): 767 def GetDromaeoBenchmarks():
701 valid = DromaeoTester.get_valid_dromaeo_tags() 768 valid = DromaeoTester.GetValidDromaeoTags()
702 benchmarks = reduce(lambda l1,l2: l1+l2, 769 benchmarks = reduce(lambda l1,l2: l1+l2,
703 [tests for (tag, tests) in 770 [tests for (tag, tests) in
704 DromaeoTester.DROMAEO_BENCHMARKS.values() 771 DromaeoTester.DROMAEO_BENCHMARKS.values()
705 if tag in valid]) 772 if tag in valid])
706 return map(DromaeoTester.legalize_filename, benchmarks) 773 return map(DromaeoTester.LegalizeFilename, benchmarks)
707 774
708 @staticmethod 775 @staticmethod
709 def get_dromaeo_versions(): 776 def GetDromaeoVersions():
710 return ['js', 'dart2js_html'] 777 return ['js', 'dart2js_html']
711 778
712 779
713 class DromaeoTest(RuntimePerformanceTest): 780 class DromaeoTest(RuntimePerformanceTest):
714 """Runs Dromaeo tests, in the browser.""" 781 """Runs Dromaeo tests, in the browser."""
715 def __init__(self, test_runner): 782 def __init__(self, test_runner):
716 super(DromaeoTest, self).__init__( 783 super(DromaeoTest, self).__init__(
717 self.name(), 784 self.Name(),
718 BrowserTester.get_browsers(True), 785 BrowserTester.GetBrowsers(True),
719 'browser', 786 'browser',
720 DromaeoTester.get_dromaeo_versions(), 787 DromaeoTester.GetDromaeoVersions(),
721 DromaeoTester.get_dromaeo_benchmarks(), test_runner, 788 DromaeoTester.GetDromaeoBenchmarks(), test_runner,
722 self.DromaeoPerfTester(self), 789 self.DromaeoPerfTester(self),
723 self.DromaeoFileProcessor(self)) 790 self.DromaeoFileProcessor(self))
724 791
725 @staticmethod 792 @staticmethod
726 def name(): 793 def Name():
727 return 'dromaeo' 794 return 'dromaeo'
728 795
729 class DromaeoPerfTester(DromaeoTester): 796 class DromaeoPerfTester(DromaeoTester):
730 def move_chrome_driver_if_needed(self, browser): 797 def MoveChromeDriverIfNeeded(self, browser):
731 """Move the appropriate version of ChromeDriver onto the path. 798 """Move the appropriate version of ChromeDriver onto the path.
732 TODO(efortuna): This is a total hack because the latest version of Chrome 799 TODO(efortuna): This is a total hack because the latest version of Chrome
733 (Dartium builds) requires a different version of ChromeDriver, that is 800 (Dartium builds) requires a different version of ChromeDriver, that is
734 incompatible with the release or beta Chrome and vice versa. Remove these 801 incompatible with the release or beta Chrome and vice versa. Remove these
735 shenanigans once we're back to both versions of Chrome using the same 802 shenanigans once we're back to both versions of Chrome using the same
736 version of ChromeDriver. IMPORTANT NOTE: This assumes your chromedriver is 803 version of ChromeDriver. IMPORTANT NOTE: This assumes your chromedriver is
737 in the default location (inside depot_tools). 804 in the default location (inside depot_tools).
738 """ 805 """
739 current_dir = os.getcwd() 806 current_dir = os.getcwd()
740 self.test.test_runner.get_archive('chromedriver') 807 self.test.test_runner.GetArchive('chromedriver')
741 path = os.environ['PATH'].split(os.pathsep) 808 path = os.environ['PATH'].split(os.pathsep)
742 orig_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', 'testing', 809 orig_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', 'testing',
743 'orig-chromedriver') 810 'orig-chromedriver')
744 dartium_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', 811 dartium_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools',
745 'testing', 812 'testing',
746 'dartium-chromedriver') 813 'dartium-chromedriver')
747 extension = '' 814 extension = ''
748 if platform.system() == 'Windows': 815 if platform.system() == 'Windows':
749 extension = '.exe' 816 extension = '.exe'
750 817
751 def move_chromedriver(depot_tools, copy_to_depot_tools_dir=True, 818 def MoveChromedriver(depot_tools, copy_to_depot_tools_dir=True,
752 from_path=None): 819 from_path=None):
753 if from_path: 820 if from_path:
754 from_dir = from_path + extension 821 from_dir = from_path + extension
755 else: 822 else:
756 from_dir = os.path.join(orig_chromedriver_path, 823 from_dir = os.path.join(orig_chromedriver_path,
757 'chromedriver' + extension) 824 'chromedriver' + extension)
758 to_dir = os.path.join(depot_tools, 'chromedriver' + extension) 825 to_dir = os.path.join(depot_tools, 'chromedriver' + extension)
759 if not copy_to_depot_tools_dir: 826 if not copy_to_depot_tools_dir:
760 tmp = to_dir 827 tmp = to_dir
761 to_dir = from_dir 828 to_dir = from_dir
762 from_dir = tmp 829 from_dir = tmp
763 print >> sys.stderr, from_dir 830 print >> sys.stderr, from_dir
764 print >> sys.stderr, to_dir 831 print >> sys.stderr, to_dir
765 if not os.path.exists(os.path.dirname(to_dir)): 832 if not os.path.exists(os.path.dirname(to_dir)):
766 os.makedirs(os.path.dirname(to_dir)) 833 os.makedirs(os.path.dirname(to_dir))
767 shutil.copyfile(from_dir, to_dir) 834 shutil.copyfile(from_dir, to_dir)
768 835
769 for loc in path: 836 for loc in path:
770 if 'depot_tools' in loc: 837 if 'depot_tools' in loc:
771 if browser == 'chrome': 838 if browser == 'chrome':
772 if os.path.exists(orig_chromedriver_path): 839 if os.path.exists(orig_chromedriver_path):
773 move_chromedriver(loc) 840 MoveChromedriver(loc)
774 elif browser == 'dartium': 841 elif browser == 'dartium':
775 if self.test.test_runner.current_revision_num < FIRST_CHROMEDRIVER: 842 if (int(self.test.test_runner.current_revision_num) <
843 FIRST_CHROMEDRIVER):
776 # If we don't have a stashed a different chromedriver just use 844 # If we don't have a stashed a different chromedriver just use
777 # the regular chromedriver. 845 # the regular chromedriver.
778 self.test.test_runner.run_cmd(os.path.join( 846 self.test.test_runner.RunCmd(os.path.join(
779 TOP_LEVEL_DIR, 'tools', 'testing', 'webdriver_test_setup.py'), 847 TOP_LEVEL_DIR, 'tools', 'testing', 'webdriver_test_setup.py'),
780 '-f', '-s', '-p') 848 '-f', '-s', '-p')
781 elif not os.path.exists(dartium_chromedriver_path): 849 elif not os.path.exists(dartium_chromedriver_path):
782 self.test.test_runner.get_archive('chromedriver') 850 stdout, _ = self.test.test_runner.GetArchive('chromedriver')
783 # Move original chromedriver for storage. 851 # Move original chromedriver for storage.
784 if not os.path.exists(orig_chromedriver_path): 852 if not os.path.exists(orig_chromedriver_path):
785 move_chromedriver(loc, copy_to_depot_tools_dir=False) 853 MoveChromedriver(loc, copy_to_depot_tools_dir=False)
786 if self.test.test_runner.current_revision_num >= FIRST_CHROMEDRIVER: 854 if self.test.test_runner.current_revision_num >= FIRST_CHROMEDRIVER:
787 # Copy Dartium chromedriver into depot_tools 855 # Copy Dartium chromedriver into depot_tools
788 move_chromedriver(loc, from_path=os.path.join( 856 MoveChromedriver(loc, from_path=os.path.join(
789 dartium_chromedriver_path, 'chromedriver')) 857 dartium_chromedriver_path, 'chromedriver'))
790 os.chdir(current_dir) 858 os.chdir(current_dir)
791 859
792 def run_tests(self): 860 def RunTests(self):
793 """Run dromaeo in the browser.""" 861 """Run dromaeo in the browser."""
794 862
795 self.test.test_runner.get_archive('dartium') 863 self.test.test_runner.GetArchive('dartium')
796 864
797 # Build tests. 865 # Build tests.
798 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo') 866 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo')
799 current_path = os.getcwd() 867 current_path = os.getcwd()
800 os.chdir(dromaeo_path) 868 os.chdir(dromaeo_path)
801 self.test.test_runner.run_cmd(['python', 'generate_dart2js_tests.py']) 869 if os.path.exists('generate_dart2js_tests.py'):
870 stdout, _ = self.test.test_runner.RunCmd(
871 ['python', 'generate_dart2js_tests.py'])
872 else:
873 stdout, _ = self.test.test_runner.RunCmd(
874 ['python', 'generate_frog_tests.py'])
802 os.chdir(current_path) 875 os.chdir(current_path)
876 if 'Error: Compilation failed' in stdout:
877 return
878 versions = DromaeoTester.GetDromaeoVersions()
803 879
804 versions = DromaeoTester.get_dromaeo_versions() 880 for browser in BrowserTester.GetBrowsers():
805 881 self.MoveChromeDriverIfNeeded(browser)
806 for browser in BrowserTester.get_browsers():
807 self.move_chrome_driver_if_needed(browser)
808 for version_name in versions: 882 for version_name in versions:
809 if not self.test.is_valid_combination(browser, version_name): 883 if not self.test.IsValidCombination(browser, version_name):
810 continue 884 continue
811 version = DromaeoTest.DromaeoPerfTester.get_dromaeo_url_query( 885 version = DromaeoTest.DromaeoPerfTester.GetDromaeoUrlQuery(
812 browser, version_name) 886 browser, version_name)
813 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, 887 self.test.trace_file = os.path.join(TOP_LEVEL_DIR,
814 'tools', 'testing', 'perf_testing', self.test.result_folder_name, 888 'tools', 'testing', 'perf_testing', self.test.result_folder_name,
815 'dromaeo-%s-%s-%s' % (self.test.cur_time, browser, version_name)) 889 'dromaeo-%s-%s-%s' % (self.test.cur_time, browser, version_name))
816 self.add_svn_revision_to_trace(self.test.trace_file, browser) 890 self.AddSvnRevisionToTrace(self.test.trace_file, browser)
817 file_path = '"%s"' % os.path.join(os.getcwd(), dromaeo_path, 891 file_path = '"%s"' % os.path.join(os.getcwd(), dromaeo_path,
818 'index-js.html?%s' % version) 892 'index-js.html?%s' % version)
819 if platform.system() == 'Windows': 893 self.test.test_runner.RunCmd(
820 file_path = file_path.replace('&', '^&')
821 file_path = file_path.replace('?', '^?')
822 file_path = file_path.replace('|', '^|')
823 self.test.test_runner.run_cmd(
824 ['python', os.path.join('tools', 'testing', 'run_selenium.py'), 894 ['python', os.path.join('tools', 'testing', 'run_selenium.py'),
825 '--out', file_path, '--browser', browser, 895 '--out', file_path, '--browser', browser,
826 '--timeout', '900', '--mode', 'dromaeo'], self.test.trace_file, 896 '--timeout', '900', '--mode', 'dromaeo'], self.test.trace_file,
827 append=True) 897 append=True)
828 # Put default Chromedriver back in. 898 # Put default Chromedriver back in.
829 self.move_chrome_driver_if_needed('chrome') 899 self.MoveChromeDriverIfNeeded('chrome')
830 900
831 @staticmethod 901 @staticmethod
832 def get_dromaeo_url_query(browser, version): 902 def GetDromaeoUrlQuery(browser, version):
833 if browser == 'dartium': 903 if browser == 'dartium':
834 version = version.replace('frog', 'dart') 904 version = version.replace('frog', 'dart')
835 version = version.replace('_','&') 905 version = version.replace('_','AND')
836 tags = DromaeoTester.get_valid_dromaeo_tags() 906 tags = DromaeoTester.GetValidDromaeoTags()
837 return '|'.join([ '%s&%s' % (version, tag) for tag in tags]) 907 return 'OR'.join([ '%sAND%s' % (version, tag) for tag in tags])
838 908
839 909
840 class DromaeoFileProcessor(Processor): 910 class DromaeoFileProcessor(Processor):
841 def process_file(self, afile, should_post_file): 911 def ProcessFile(self, afile, should_post_file):
842 """Comb through the html to find the performance results. 912 """Comb through the html to find the performance results.
843 Returns: True if we successfully posted our data to storage.""" 913 Returns: True if we successfully posted our data to storage."""
844 parts = afile.split('-') 914 parts = afile.split('-')
845 browser = parts[2] 915 browser = parts[2]
846 version = parts[3] 916 version = parts[3]
847 917
848 bench_dict = self.test.values_dict[browser][version] 918 bench_dict = self.test.values_dict[browser][version]
849 919
850 f = self.open_trace_file(afile, should_post_file) 920 f = self.OpenTraceFile(afile, should_post_file)
851 lines = f.readlines() 921 lines = f.readlines()
852 i = 0 922 i = 0
853 revision_num = 0 923 revision_num = 0
854 revision_pattern = r'Revision: (\d+)' 924 revision_pattern = r'Revision: (\d+)'
855 suite_pattern = r'<div class="result-item done">(.+?)</ol></div>' 925 suite_pattern = r'<div class="result-item done">(.+?)</ol></div>'
856 result_pattern = r'<b>(.+?)</b>(.+?)<small> runs/s(.+)' 926 result_pattern = r'<b>(.+?)</b>(.+?)<small> runs/s(.+)'
857 927
858 upload_success = True 928 upload_success = True
859 for line in lines: 929 for line in lines:
860 rev = re.match(revision_pattern, line.strip()) 930 rev = re.match(revision_pattern, line.strip())
861 if rev: 931 if rev:
862 revision_num = int(rev.group(1)) 932 revision_num = int(rev.group(1))
863 continue 933 continue
864 934
865 suite_results = re.findall(suite_pattern, line) 935 suite_results = re.findall(suite_pattern, line)
866 if suite_results: 936 if suite_results:
867 for suite_result in suite_results: 937 for suite_result in suite_results:
868 results = re.findall(r'<li>(.*?)</li>', suite_result) 938 results = re.findall(r'<li>(.*?)</li>', suite_result)
869 if results: 939 if results:
870 for result in results: 940 for result in results:
871 r = re.match(result_pattern, result) 941 r = re.match(result_pattern, result)
872 name = DromaeoTester.legalize_filename(r.group(1).strip(':')) 942 name = DromaeoTester.LegalizeFilename(r.group(1).strip(':'))
873 score = float(r.group(2)) 943 score = float(r.group(2))
874 bench_dict[name] += [float(score)] 944 bench_dict[name] += [float(score)]
875 self.test.revision_dict[browser][version][name] += \ 945 self.test.revision_dict[browser][version][name] += \
876 [revision_num] 946 [revision_num]
877 if not self.test.test_runner.no_upload and should_post_file: 947 if not self.test.test_runner.no_upload and should_post_file:
878 upload_success = upload_success and self.report_results( 948 upload_success = upload_success and self.ReportResults(
879 name, score, browser, version, revision_num, 949 name, score, browser, version, revision_num,
880 self.get_score_type(name)) 950 self.GetScoreType(name))
881 else: 951 else:
882 upload_success = False 952 upload_success = False
883 953
884 f.close() 954 f.close()
885 self.calculate_geometric_mean(browser, version, revision_num) 955 self.CalculateGeometricMean(browser, version, revision_num)
886 return upload_success 956 return upload_success
887 957
888 class TestBuilder(object): 958 class TestBuilder(object):
889 """Construct the desired test object.""" 959 """Construct the desired test object."""
890 available_suites = dict((suite.name(), suite) for suite in [ 960 available_suites = dict((suite.Name(), suite) for suite in [
891 CommonBrowserTest, DromaeoTest]) 961 CommonBrowserTest, DromaeoTest])
892 962
893 @staticmethod 963 @staticmethod
894 def make_test(test_name, test_runner): 964 def MakeTest(test_name, test_runner):
895 return TestBuilder.available_suites[test_name](test_runner) 965 return TestBuilder.available_suites[test_name](test_runner)
896 966
897 @staticmethod 967 @staticmethod
898 def available_suite_names(): 968 def AvailableSuiteNames():
899 return TestBuilder.available_suites.keys() 969 return TestBuilder.available_suites.keys()
900 970
901 def search_for_revision(directory = None): 971
972 def SearchForRevision(directory = None):
902 """Find the current revision number in the desired directory. If directory is 973 """Find the current revision number in the desired directory. If directory is
903 None, find the revision number in the current directory.""" 974 None, find the revision number in the current directory."""
904 def find_revision(svn_info_command): 975 def FindRevision(svn_info_command):
905 p = subprocess.Popen(svn_info_command, stdout = subprocess.PIPE, 976 p = subprocess.Popen(svn_info_command, stdout = subprocess.PIPE,
906 stderr = subprocess.STDOUT, 977 stderr = subprocess.STDOUT,
907 shell = (platform.system() == 'Windows')) 978 shell = (platform.system() == 'Windows'))
908 output, _ = p.communicate() 979 output, _ = p.communicate()
909 for line in output.split('\n'): 980 for line in output.split('\n'):
910 if 'Revision' in line: 981 if 'Revision' in line:
911 return int(line.split()[1]) 982 return int(line.split()[1])
912 return -1 983 return -1
913 984
914 cwd = os.getcwd() 985 cwd = os.getcwd()
915 if not directory: 986 if not directory:
916 directory = cwd 987 directory = cwd
917 os.chdir(directory) 988 os.chdir(directory)
918 revision_num = int(find_revision(['svn', 'info'])) 989 revision_num = int(FindRevision(['svn', 'info']))
919 if revision_num == -1: 990 if revision_num == -1:
920 revision_num = int(find_revision(['git', 'svn', 'info'])) 991 revision_num = int(FindRevision(['git', 'svn', 'info']))
921 os.chdir(cwd) 992 os.chdir(cwd)
922 return str(revision_num) 993 return str(revision_num)
923 994
924 def update_set_of_done_cls(revision_num=None): 995
996 def UpdateSetOfDoneCls(revision_num=None):
925 """Update the set of CLs that do not need additional performance runs. 997 """Update the set of CLs that do not need additional performance runs.
926 Args: 998 Args:
927 revision_num: an additional number to be added to the 'done set' 999 revision_num: an additional number to be added to the 'done set'
928 """ 1000 """
929 filename = os.path.join(TOP_LEVEL_DIR, 'cached_results.txt') 1001 filename = os.path.join(TOP_LEVEL_DIR, 'cached_results.txt')
930 if not os.path.exists(filename): 1002 if not os.path.exists(filename):
931 f = open(filename, 'w') 1003 f = open(filename, 'w')
932 results = set() 1004 results = set()
933 pickle.dump(results, f) 1005 pickle.dump(results, f)
934 f.close() 1006 f.close()
935 f = open(filename, 'r+') 1007 f = open(filename, 'r+')
936 result_set = pickle.load(f) 1008 result_set = pickle.load(f)
937 if revision_num: 1009 if revision_num:
938 f.seek(0) 1010 f.seek(0)
939 result_set.add(revision_num) 1011 result_set.add(revision_num)
940 pickle.dump(result_set, f) 1012 pickle.dump(result_set, f)
941 f.close() 1013 f.close()
942 return result_set 1014 return result_set
943 1015
944 def fill_in_back_history(results_set, runner): 1016
1017 def FillInBackHistory(results_set, runner):
945 """Fill in back history performance data. This is done one of two ways, with 1018 """Fill in back history performance data. This is done one of two ways, with
946 equal probability of trying each way (falling back on the sequential version 1019 equal probability of trying each way (falling back on the sequential version
947 as our data becomes more densely populated).""" 1020 as our data becomes more densely populated)."""
948 has_run_extra = False 1021 has_run_extra = False
949 revision_num = int(search_for_revision(DART_REPO_LOC)) 1022 revision_num = int(SearchForRevision(DART_REPO_LOC))
950 1023
951 def try_to_run_additional(revision_number): 1024 def TryToRunAdditional(revision_number):
952 """Determine the number of results we have stored for a particular revision 1025 """Determine the number of results we have stored for a particular revision
953 number, and if it is less than 10, run some extra tests. 1026 number, and if it is less than 10, run some extra tests.
954 Args: 1027 Args:
955 - revision_number: the revision whose performance we want to potentially 1028 - revision_number: the revision whose performance we want to potentially
956 test. 1029 test.
957 Returns: True if we successfully ran some additional tests.""" 1030 Returns: True if we successfully ran some additional tests."""
958 if not runner.has_interesting_code(revision_number): 1031 if not runner.HasInterestingCode(revision_number)[0]:
959 results_set = update_set_of_done_cls(revision_number) 1032 results_set = UpdateSetOfDoneCls(revision_number)
960 return False 1033 return False
961 a_test = TestBuilder.make_test(runner.suite_names[0], runner) 1034 a_test = TestBuilder.MakeTest(runner.suite_names[0], runner)
962 benchmark_name = a_test.values_list[0] 1035 benchmark_name = a_test.values_list[0]
963 platform_name = a_test.platform_list[0] 1036 platform_name = a_test.platform_list[0]
964 variant = a_test.values_dict[platform_name].keys()[0] 1037 variant = a_test.values_dict[platform_name].keys()[0]
965 num_results = post_results.get_num_results(benchmark_name, 1038 num_results = post_results.get_num_results(benchmark_name,
966 platform_name, variant, revision_number, 1039 platform_name, variant, revision_number,
967 a_test.file_processor.get_score_type(benchmark_name)) 1040 a_test.file_processor.GetScoreType(benchmark_name))
968 if num_results < 10: 1041 if num_results < 10:
969 # Run at most two more times. 1042 # Run at most two more times.
970 if num_results > 8: 1043 if num_results > 8:
971 reruns = 10 - num_results 1044 reruns = 10 - num_results
972 else: 1045 else:
973 reruns = 2 1046 reruns = 2
974 run = runner.run_test_sequence(revision_num=str(revision_number), 1047 run = runner.RunTestSequence(revision_num=str(revision_number),
975 num_reruns=reruns) 1048 num_reruns=reruns)
976 if num_results >= 10 or run == 0 and num_results + reruns >= 10: 1049 if num_results >= 10 or run == 0 and num_results + reruns >= 10:
977 results_set = update_set_of_done_cls(revision_number) 1050 results_set = UpdateSetOfDoneCls(revision_number)
978 else: 1051 else:
979 return False 1052 return False
980 return True 1053 return True
981 1054
982 if random.choice([True, False]): 1055 if random.choice([True, False]):
983 # Select a random CL number, with greater likelihood of selecting a CL in 1056 # Select a random CL number, with greater likelihood of selecting a CL in
984 # the more recent history than the distant past (using a simplified weighted 1057 # the more recent history than the distant past (using a simplified weighted
985 # bucket algorithm). If that CL has less than 10 runs, run additional. If it 1058 # bucket algorithm). If that CL has less than 10 runs, run additional. If it
986 # already has 10 runs, look for another CL number that is not yet have all 1059 # already has 10 runs, look for another CL number that is not yet have all
987 # of its additional runs (do this up to 15 times). 1060 # of its additional runs (do this up to 15 times).
988 tries = 0 1061 tries = 0
989 # Select which "thousands bucket" we're going to run additional tests for. 1062 # Select which "thousands bucket" we're going to run additional tests for.
990 bucket_size = 1000 1063 bucket_size = 1000
991 thousands_list = range(EARLIEST_REVISION/bucket_size, 1064 thousands_list = range(EARLIEST_REVISION/bucket_size,
992 int(revision_num)/bucket_size + 1) 1065 int(revision_num)/bucket_size + 1)
993 weighted_total = sum(thousands_list) 1066 weighted_total = sum(thousands_list)
994 generated_random_number = random.randint(0, weighted_total - 1) 1067 generated_random_number = random.randint(0, weighted_total - 1)
995 for i in list(reversed(thousands_list)): 1068 for i in list(reversed(thousands_list)):
996 thousands = thousands_list[i - 1] 1069 thousands = i
997 weighted_total -= thousands_list[i - 1] 1070 weighted_total -= i
998 if weighted_total <= generated_random_number: 1071 if weighted_total <= generated_random_number:
999 break 1072 break
1000 while tries < 15 and not has_run_extra: 1073 while tries < 15 and not has_run_extra:
1001 # Now select a particular revision in that bucket. 1074 # Now select a particular revision in that bucket.
1002 if thousands == int(revision_num)/bucket_size: 1075 if thousands == int(revision_num)/bucket_size:
1003 max_range = 1 + revision_num % bucket_size 1076 max_range = 1 + revision_num % bucket_size
1004 else: 1077 else:
1005 max_range = bucket_size 1078 max_range = bucket_size
1006 rev = thousands * bucket_size + random.randrange(0, max_range) 1079 rev = thousands * bucket_size + random.randrange(0, max_range)
1007 if rev not in results_set: 1080 if rev not in results_set:
1008 has_run_extra = try_to_run_additional(rev) 1081 has_run_extra = TryToRunAdditional(rev)
1009 tries += 1 1082 tries += 1
1010 1083
1011 if not has_run_extra: 1084 if not has_run_extra:
1012 # Try to get up to 10 runs of each CL, starting with the most recent 1085 # Try to get up to 10 runs of each CL, starting with the most recent
1013 # CL that does not yet have 10 runs. But only perform a set of extra 1086 # CL that does not yet have 10 runs. But only perform a set of extra
1014 # runs at most 2 at a time before checking to see if new code has been 1087 # runs at most 2 at a time before checking to see if new code has been
1015 # checked in. 1088 # checked in.
1016 while revision_num > EARLIEST_REVISION and not has_run_extra: 1089 while revision_num > EARLIEST_REVISION and not has_run_extra:
1017 if revision_num not in results_set: 1090 if revision_num not in results_set:
1018 has_run_extra = try_to_run_additional(revision_num) 1091 has_run_extra = TryToRunAdditional(revision_num)
1019 revision_num -= 1 1092 revision_num -= 1
1020 if not has_run_extra: 1093 if not has_run_extra:
1021 # No more extra back-runs to do (for now). Wait for new code. 1094 # No more extra back-runs to do (for now). Wait for new code.
1022 time.sleep(200) 1095 time.sleep(200)
1023 return results_set 1096 return results_set
1024 1097
1098
1025 def main(): 1099 def main():
1026 runner = TestRunner() 1100 runner = TestRunner()
1027 continuous = runner.parse_args() 1101 continuous = runner.ParseArgs()
1028 1102
1029 if not os.path.exists(DART_REPO_LOC): 1103 if not os.path.exists(DART_REPO_LOC):
1030 os.mkdir(dirname(DART_REPO_LOC)) 1104 os.mkdir(dirname(DART_REPO_LOC))
1031 os.chdir(dirname(DART_REPO_LOC)) 1105 os.chdir(dirname(DART_REPO_LOC))
1032 p = subprocess.Popen('gclient config https://dart.googlecode.com/svn/' + 1106 p = subprocess.Popen('gclient config https://dart.googlecode.com/svn/' +
1033 'branches/bleeding_edge/deps/all.deps', 1107 'branches/bleeding_edge/deps/all.deps',
1034 stdout=subprocess.PIPE, stderr=subprocess.PIPE, 1108 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1035 shell=True) 1109 shell=True)
1036 p.communicate() 1110 p.communicate()
1037 if continuous: 1111 if continuous:
1038 while True: 1112 while True:
1039 results_set = update_set_of_done_cls() 1113 results_set = UpdateSetOfDoneCls()
1040 if runner.has_interesting_code(): 1114 (is_interesting, interesting_rev_num) = runner.HasInterestingCode()
1041 runner.run_test_sequence() 1115 if is_interesting:
1116 runner.RunTestSequence(interesting_rev_num)
1042 else: 1117 else:
1043 results_set = fill_in_back_history(results_set, runner) 1118 if runner.backfill:
1119 results_set = FillInBackHistory(results_set, runner)
1120 else:
1121 time.sleep(200)
1044 else: 1122 else:
1045 runner.run_test_sequence() 1123 runner.RunTestSequence()
1046 1124
1047 if __name__ == '__main__': 1125 if __name__ == '__main__':
1048 main() 1126 main()
OLDNEW
« no previous file with comments | « no previous file | tools/testing/webdriver_test_setup.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698