Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: tools/testing/perf_testing/run_perf_tests.py

Issue 10829408: Smarter "new interesting code" detection. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/testing/webdriver_test_setup.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 2
3 # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 3 # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
4 # for details. All rights reserved. Use of this source code is governed by a 4 # for details. All rights reserved. Use of this source code is governed by a
5 # BSD-style license that can be found in the LICENSE file. 5 # BSD-style license that can be found in the LICENSE file.
6 6
7 import datetime 7 import datetime
8 import math 8 import math
9 import optparse 9 import optparse
10 import os 10 import os
(...skipping 27 matching lines...) Expand all
38 different svn revisions. It tests to see if there a newer version of the code on 38 different svn revisions. It tests to see if there a newer version of the code on
39 the server, and will sync and run the performance tests if so.""" 39 the server, and will sync and run the performance tests if so."""
40 class TestRunner(object): 40 class TestRunner(object):
41 41
42 def __init__(self): 42 def __init__(self):
43 self.verbose = False 43 self.verbose = False
44 self.has_shell = False 44 self.has_shell = False
45 if platform.system() == 'Windows': 45 if platform.system() == 'Windows':
46 # On Windows, shell must be true to get the correct environment variables. 46 # On Windows, shell must be true to get the correct environment variables.
47 self.has_shell = True 47 self.has_shell = True
48 self.current_revision_num = None
48 49
49 def run_cmd(self, cmd_list, outfile=None, append=False, std_in=''): 50 def RunCmd(self, cmd_list, outfile=None, append=False, std_in=''):
50 """Run the specified command and print out any output to stdout. 51 """Run the specified command and print out any output to stdout.
51 52
52 Args: 53 Args:
53 cmd_list: a list of strings that make up the command to run 54 cmd_list: a list of strings that make up the command to run
54 outfile: a string indicating the name of the file that we should write 55 outfile: a string indicating the name of the file that we should write
55 stdout to 56 stdout to
56 append: True if we want to append to the file instead of overwriting it 57 append: True if we want to append to the file instead of overwriting it
57 std_in: a string that should be written to the process executing to 58 std_in: a string that should be written to the process executing to
58 interact with it (if needed)""" 59 interact with it (if needed)"""
59 if self.verbose: 60 if self.verbose:
(...skipping 11 matching lines...) Expand all
71 out.seek(0, os.SEEK_END) 72 out.seek(0, os.SEEK_END)
72 p = subprocess.Popen(cmd_list, stdout = out, stderr=subprocess.PIPE, 73 p = subprocess.Popen(cmd_list, stdout = out, stderr=subprocess.PIPE,
73 stdin=subprocess.PIPE, shell=self.has_shell) 74 stdin=subprocess.PIPE, shell=self.has_shell)
74 output, stderr = p.communicate(std_in) 75 output, stderr = p.communicate(std_in)
75 if output: 76 if output:
76 print output 77 print output
77 if stderr: 78 if stderr:
78 print stderr 79 print stderr
79 return output, stderr 80 return output, stderr
80 81
81 def time_cmd(self, cmd): 82 def TimeCmd(self, cmd):
82 """Determine the amount of (real) time it takes to execute a given 83 """Determine the amount of (real) time it takes to execute a given
83 command.""" 84 command."""
84 start = time.time() 85 start = time.time()
85 self.run_cmd(cmd) 86 self.RunCmd(cmd)
86 return time.time() - start 87 return time.time() - start
87 88
88 def clear_out_unversioned_files(self): 89 def ClearOutUnversionedFiles(self):
89 """Remove all files that are unversioned by svn.""" 90 """Remove all files that are unversioned by svn."""
90 if os.path.exists(DART_REPO_LOC): 91 if os.path.exists(DART_REPO_LOC):
91 os.chdir(DART_REPO_LOC) 92 os.chdir(DART_REPO_LOC)
92 results, _ = self.run_cmd(['svn', 'st']) 93 results, _ = self.RunCmd(['svn', 'st'])
93 for line in results.split('\n'): 94 for line in results.split('\n'):
94 if line.startswith('?'): 95 if line.startswith('?'):
95 to_remove = line.split()[1] 96 to_remove = line.split()[1]
96 if os.path.isdir(to_remove): 97 if os.path.isdir(to_remove):
97 shutil.rmtree(to_remove)#, ignore_errors=True) 98 shutil.rmtree(to_remove)#, ignore_errors=True)
98 else: 99 else:
99 os.remove(to_remove) 100 os.remove(to_remove)
100 101
101 def get_archive(self, archive_name): 102 def GetArchive(self, archive_name):
102 """Wrapper around the pulling down a specific archive from Google Storage. 103 """Wrapper around the pulling down a specific archive from Google Storage.
103 Adds a specific revision argument as needed. 104 Adds a specific revision argument as needed.
104 Returns: The stderr from running this command.""" 105 Returns: The stdout and stderr from running this command."""
105 cmd = ['python', os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py'), 106 while True:
106 archive_name] 107 cmd = ['python', os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py'),
107 if self.current_revision_num != -1: 108 archive_name]
108 cmd += ['-r', self.current_revision_num] 109 if int(self.current_revision_num) != -1:
109 _, stderr = self.run_cmd(cmd) 110 cmd += ['-r', str(self.current_revision_num)]
110 return stderr 111 stdout, stderr = self.RunCmd(cmd)
112 if 'Please try again later' in stdout:
113 time.sleep(100)
114 else:
115 break
116 return (stdout, stderr)
111 117
112 def sync_and_build(self, suites, revision_num=''): 118 def _Sync(self, revision_num=None):
113 """Make sure we have the latest version of of the repo, and build it. We 119 """Update the repository to the latest or specified revision."""
114 begin and end standing in DART_REPO_LOC.
115
116 Args:
117 suites: The set of suites that we wish to build.
118
119 Returns:
120 err_code = 1 if there was a problem building."""
121 os.chdir(dirname(DART_REPO_LOC)) 120 os.chdir(dirname(DART_REPO_LOC))
122 self.clear_out_unversioned_files() 121 self.ClearOutUnversionedFiles()
123 if revision_num == '': 122 if not revision_num:
124 self.run_cmd(['gclient', 'sync']) 123 self.RunCmd(['gclient', 'sync'])
125 else: 124 else:
126 self.run_cmd(['gclient', 'sync', '-r', revision_num, '-t']) 125 self.RunCmd(['gclient', 'sync', '-r', str(revision_num), '-t'])
127 126
128 shutil.copytree(os.path.join(TOP_LEVEL_DIR, 'internal'), 127 shutil.copytree(os.path.join(TOP_LEVEL_DIR, 'internal'),
129 os.path.join(DART_REPO_LOC, 'internal')) 128 os.path.join(DART_REPO_LOC, 'internal'))
130 shutil.copy(os.path.join(TOP_LEVEL_DIR, 'tools', 'get_archive.py'), 129 shutil.copy(os.path.join(TOP_LEVEL_DIR, 'tools', 'get_archive.py'),
131 os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py')) 130 os.path.join(DART_REPO_LOC, 'tools', 'get_archive.py'))
132 shutil.copy( 131 shutil.copy(
133 os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'run_selenium.py'), 132 os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'run_selenium.py'),
134 os.path.join(DART_REPO_LOC, 'tools', 'testing', 'run_selenium.py')) 133 os.path.join(DART_REPO_LOC, 'tools', 'testing', 'run_selenium.py'))
135 134
136 if revision_num == '': 135 def SyncAndBuild(self, suites, revision_num=None):
137 revision_num = search_for_revision() 136 """Make sure we have the latest version of of the repo, and build it. We
137 begin and end standing in DART_REPO_LOC.
138
139 Args:
140 suites: The set of suites that we wish to build.
141
142 Returns:
143 err_code = 1 if there was a problem building."""
144 self._Sync(revision_num)
145 if not revision_num:
146 revision_num = SearchForRevision()
138 147
139 self.current_revision_num = revision_num 148 self.current_revision_num = revision_num
140 stderr = self.get_archive('sdk') 149 stdout, stderr = self.GetArchive('sdk')
141 if not os.path.exists(os.path.join( 150 if (not os.path.exists(os.path.join(
142 DART_REPO_LOC, 'tools', 'get_archive.py')) \ 151 DART_REPO_LOC, 'tools', 'get_archive.py'))
143 or 'InvalidUriError' in stderr: 152 or 'InvalidUriError' in stderr or "Couldn't download" in stdout):
144 # Couldn't find the SDK on Google Storage. Build it locally. 153 # Couldn't find the SDK on Google Storage. Build it locally.
145 154
146 # On Windows, the output directory is marked as "Read Only," which causes 155 # On Windows, the output directory is marked as "Read Only," which causes
147 # an error to be thrown when we use shutil.rmtree. This helper function 156 # an error to be thrown when we use shutil.rmtree. This helper function
148 # changes the permissions so we can still delete the directory. 157 # changes the permissions so we can still delete the directory.
149 def on_rm_error(func, path, exc_info): 158 def on_rm_error(func, path, exc_info):
150 if os.path.exists(path): 159 if os.path.exists(path):
151 os.chmod(path, stat.S_IWRITE) 160 os.chmod(path, stat.S_IWRITE)
152 os.unlink(path) 161 os.unlink(path)
153 # TODO(efortuna): Currently always building ia32 architecture because we 162 # TODO(efortuna): Currently always building ia32 architecture because we
154 # don't have test statistics for what's passing on x64. Eliminate arch 163 # don't have test statistics for what's passing on x64. Eliminate arch
155 # specification when we have tests running on x64, too. 164 # specification when we have tests running on x64, too.
156 shutil.rmtree(os.path.join(os.getcwd(), 165 shutil.rmtree(os.path.join(os.getcwd(),
157 utils.GetBuildRoot(utils.GuessOS(), 'release', 'ia32')), 166 utils.GetBuildRoot(utils.GuessOS(), 'release', 'ia32')),
158 onerror=on_rm_error) 167 onerror=on_rm_error)
159 lines = self.run_cmd([os.path.join('.', 'tools', 'build.py'), '-m', 168 lines = self.RunCmd([os.path.join('.', 'tools', 'build.py'), '-m',
160 'release', '--arch=ia32', 'create_sdk']) 169 'release', '--arch=ia32', 'create_sdk'])
161 170
162 for line in lines: 171 for line in lines:
163 if 'BUILD FAILED' in line: 172 if 'BUILD FAILED' in line:
164 # Someone checked in a broken build! Stop trying to make it work 173 # Someone checked in a broken build! Stop trying to make it work
165 # and wait to try again. 174 # and wait to try again.
166 print 'Broken Build' 175 print 'Broken Build'
167 return 1 176 return 1
168 return 0 177 return 0
169 178
170 def ensure_output_directory(self, dir_name): 179 def EnsureOutputDirectory(self, dir_name):
171 """Test that the listed directory name exists, and if not, create one for 180 """Test that the listed directory name exists, and if not, create one for
172 our output to be placed. 181 our output to be placed.
173 182
174 Args: 183 Args:
175 dir_name: the directory we will create if it does not exist.""" 184 dir_name: the directory we will create if it does not exist."""
176 dir_path = os.path.join(TOP_LEVEL_DIR, 'tools', 185 dir_path = os.path.join(TOP_LEVEL_DIR, 'tools',
177 'testing', 'perf_testing', dir_name) 186 'testing', 'perf_testing', dir_name)
178 if not os.path.exists(dir_path): 187 if not os.path.exists(dir_path):
179 os.makedirs(dir_path) 188 os.makedirs(dir_path)
180 print 'Creating output directory ', dir_path 189 print 'Creating output directory ', dir_path
181 190
182 def has_interesting_code(self, past_revision_num=None): 191 def HasInterestingCode(self, past_revision_num=None):
sra1 2012/08/21 22:59:36 Why is it called 'past_revision_num'?
Emily Fortuna 2012/08/22 21:08:20 Silly historical reasons. Fixed.
183 """Tests if there are any versions of files that might change performance 192 """Tests if there are any versions of files that might change performance
184 results on the server.""" 193 results on the server.
194 Returns a tuple of a boolean and potentially a revision number that has
195 interesting code. (If the boolean is false, there is no interesting code,
196 therefore the second element in the tuple will be None.)"""
sra1 2012/08/21 22:59:36 Might be clearer if you said something like Retur
Emily Fortuna 2012/08/22 21:08:20 Done.
185 if not os.path.exists(DART_REPO_LOC): 197 if not os.path.exists(DART_REPO_LOC):
186 return True 198 self._Sync()
187 os.chdir(DART_REPO_LOC) 199 os.chdir(DART_REPO_LOC)
188 no_effect = ['client', 'compiler', 'editor', 'pkg', 'samples', 'tests', 200 no_effect = ['client', 'compiler', 'editor', 'lib/html/doc', 'pkg',
189 'third_party', 'tools', 'utils'] 201 'samples/calculator', 'samples/chat', 'samples/clock',
sra1 2012/08/21 22:59:36 In my quick hack I has a 'yes' list and a 'no' lis
190 # Pass 'p' in if we have a new certificate for the svn server, we want to 202 'samples/dartcombat', 'samples/hi', 'samples/isolate_html',
191 # (p)ermanently accept it. 203 'samples/leap', 'samples/logo', 'samples/maps',
192 if past_revision_num: 204 'samples/markdown', 'samples/matrix', 'samples/newissues',
205 'samples/playground', 'samples/sample_extension',
206 'samples/slider', 'samples/solar', 'samples/spirodraw',
207 'samples/sunflower', 'samples/swarm', 'samples/swipe',
208 'samples/tests', 'samples/time', 'samples/ui_lib',
209 'samples/webcomponents', 'tests', 'tools/dartc',
210 'tools/get_archive.py', 'tools/test.py', 'tools/testing',
211 'tools/utils', 'third_party', 'utils']
212 def GetChangedFileList(revision):
sra1 2012/08/21 22:59:36 You could just call it GetFileList. One might assu
Emily Fortuna 2012/08/22 21:08:20 Done.
213 """Determine the set of files that were changed for a particular
214 revision."""
193 # TODO(efortuna): This assumes you're using svn. Have a git fallback as 215 # TODO(efortuna): This assumes you're using svn. Have a git fallback as
194 # well. 216 # well. Pass 'p' in if we have a new certificate for the svn server, we
195 results, _ = self.run_cmd(['svn', 'log', '-v', '-r', 217 # want to (p)ermanently accept it.
196 str(past_revision_num)], std_in='p\r\n') 218 results, _ = self.RunCmd(['svn', 'log', '-v', '-r', str(revision)],
219 std_in='p\r\n')
197 results = results.split('\n') 220 results = results.split('\n')
198 if len(results) <= 3: 221 if len(results) <= 3:
199 results = [] 222 return []
200 else: 223 else:
201 # Trim off the details about revision number and commit message. We're 224 # Trim off the details about revision number and commit message. We're
202 # only interested in the files that are changed. 225 # only interested in the files that are changed.
203 results = results[3:] 226 results = results[3:]
204 changed_files = [] 227 changed_files = []
205 for result in results: 228 for result in results:
206 if result == '': 229 if len(result) <= 1:
207 break 230 break
208 changed_files += [result.replace('/branches/bleeding_edge/dart/', '')] 231 tokens = result.split()
209 results = changed_files 232 if len(tokens) > 1:
233 # TODO(efortuna): Also check log for DEPS file changes, but to do
234 # that, we need to change our original checkout.
sra1 2012/08/21 22:59:36 So in preparation for being able to match the DEPS
Emily Fortuna 2012/08/22 21:08:20 Done.
235 changed_files += [tokens[1].replace('/branches/bleeding_edge/dart/',
236 '')]
237 return changed_files
238
239 def HasPerfAffectingResults(files_list):
240 """Determine if this set of changed files might effect performance
241 tests."""
242 def IsSafeFile(f):
243 return any(f.startswith(prefix) for prefix in no_effect)
244 return not all(IsSafeFile(f) for f in files_list)
245
246 if past_revision_num:
247 return (HasPerfAffectingResults(GetChangedFileList(
248 past_revision_num)), past_revision_num)
210 else: 249 else:
211 results, _ = self.run_cmd(['svn', 'st', '-u'], std_in='p\r\n') 250 results, _ = self.RunCmd(['svn', 'st', '-u'], std_in='p\r\n')
212 results = results.split('\n') 251 latest_interesting_server_rev = int(results.split('\n')[-2].split()[-1])
213 for line in results: 252 if self.backfill:
214 tokens = line.split() 253 done_cls = list(UpdateSetOfDoneCls())
215 if past_revision_num or len(tokens) >= 3 and '*' in tokens[-3]: 254 done_cls.sort()
216 # Loop through the changed files to see if it contains any files that 255 if done_cls:
217 # are NOT listed in the no_effect list (directories not listed in 256 last_done_cl = int(done_cls[-1])
218 # the "no_effect" list are assumed to potentially affect performance. 257 else:
219 if not reduce(lambda x, y: x or y, 258 last_done_cl = EARLIEST_REVISION
220 [tokens[-1].startswith(item) for item in no_effect], False): 259 while latest_interesting_server_rev >= last_done_cl:
221 return True 260 results = GetChangedFileList(latest_interesting_server_rev)
sra1 2012/08/21 22:59:36 better name: results -> filelist or files or filen
Emily Fortuna 2012/08/22 21:08:20 Done.
222 return False 261 if HasPerfAffectingResults(results):
262 return (True, latest_interesting_server_rev)
263 else:
264 UpdateSetOfDoneCls(latest_interesting_server_rev)
265 latest_interesting_server_rev -= 1
266 else:
267 last_done_cl = int(SearchForRevision(DART_REPO_LOC)) + 1
268 while last_done_cl <= latest_interesting_server_rev:
269 results = GetChangedFileList(last_done_cl)
270 if HasPerfAffectingResults(results):
271 return (True, last_done_cl)
272 else:
273 UpdateSetOfDoneCls(last_done_cl)
274 last_done_cl += 1
275 return (False, None)
223 276
224 def get_os_directory(self): 277 def GetOsDirectory(self):
225 """Specifies the name of the directory for the testing build of dart, which 278 """Specifies the name of the directory for the testing build of dart, which
226 has yet a different naming convention from utils.getBuildRoot(...).""" 279 has yet a different naming convention from utils.getBuildRoot(...)."""
227 if platform.system() == 'Windows': 280 if platform.system() == 'Windows':
228 return 'windows' 281 return 'windows'
229 elif platform.system() == 'Darwin': 282 elif platform.system() == 'Darwin':
230 return 'macos' 283 return 'macos'
231 else: 284 else:
232 return 'linux' 285 return 'linux'
233 286
234 def parse_args(self): 287 def ParseArgs(self):
235 parser = optparse.OptionParser() 288 parser = optparse.OptionParser()
236 parser.add_option('--suites', '-s', dest='suites', help='Run the specified ' 289 parser.add_option('--suites', '-s', dest='suites', help='Run the specified '
237 'comma-separated test suites from set: %s' % \ 290 'comma-separated test suites from set: %s' % \
238 ','.join(TestBuilder.available_suite_names()), 291 ','.join(TestBuilder.AvailableSuiteNames()),
239 action='store', default=None) 292 action='store', default=None)
240 parser.add_option('--forever', '-f', dest='continuous', help='Run this scri' 293 parser.add_option('--forever', '-f', dest='continuous', help='Run this scri'
241 'pt forever, always checking for the next svn checkin', 294 'pt forever, always checking for the next svn checkin',
242 action='store_true', default=False) 295 action='store_true', default=False)
243 parser.add_option('--nobuild', '-n', dest='no_build', action='store_true', 296 parser.add_option('--nobuild', '-n', dest='no_build', action='store_true',
244 help='Do not sync with the repository and do not ' 297 help='Do not sync with the repository and do not '
245 'rebuild.', default=False) 298 'rebuild.', default=False)
246 parser.add_option('--noupload', '-u', dest='no_upload', action='store_true', 299 parser.add_option('--noupload', '-u', dest='no_upload', action='store_true',
247 help='Do not post the results of the run.', default=False) 300 help='Do not post the results of the run.', default=False)
248 parser.add_option('--notest', '-t', dest='no_test', action='store_true', 301 parser.add_option('--notest', '-t', dest='no_test', action='store_true',
249 help='Do not run the tests.', default=False) 302 help='Do not run the tests.', default=False)
250 parser.add_option('--verbose', '-v', dest='verbose', help='Print extra ' 303 parser.add_option('--verbose', '-v', dest='verbose',
251 'debug output', action='store_true', default=False) 304 help='Print extra debug output', action='store_true',
305 default=False)
306 parser.add_option('--backfill', '-b', dest='backfill',
307 help='Backfill earlier CLs with additional results when '
308 'there is idle time.', action='store_true',
309 default=False)
252 310
253 args, ignored = parser.parse_args() 311 args, ignored = parser.parse_args()
254 312
255 if not args.suites: 313 if not args.suites:
256 suites = TestBuilder.available_suite_names() 314 suites = TestBuilder.AvailableSuiteNames()
257 else: 315 else:
258 suites = [] 316 suites = []
259 suitelist = args.suites.split(',') 317 suitelist = args.suites.split(',')
260 for name in suitelist: 318 for name in suitelist:
261 if name in TestBuilder.available_suite_names(): 319 if name in TestBuilder.AvailableSuiteNames():
262 suites.append(name) 320 suites.append(name)
263 else: 321 else:
264 print ('Error: Invalid suite %s not in ' % name) + \ 322 print ('Error: Invalid suite %s not in ' % name) + \
265 '%s' % ','.join(TestBuilder.available_suite_names()) 323 '%s' % ','.join(TestBuilder.AvailableSuiteNames())
266 sys.exit(1) 324 sys.exit(1)
267 self.suite_names = suites 325 self.suite_names = suites
268 self.no_build = args.no_build 326 self.no_build = args.no_build
269 self.no_upload = args.no_upload 327 self.no_upload = args.no_upload
270 self.no_test = args.no_test 328 self.no_test = args.no_test
271 self.verbose = args.verbose 329 self.verbose = args.verbose
330 self.backfill = args.backfill
272 return args.continuous 331 return args.continuous
273 332
274 def run_test_sequence(self, revision_num='', num_reruns=1): 333 def RunTestSequence(self, revision_num=None, num_reruns=1):
275 """Run the set of commands to (possibly) build, run, and post the results 334 """Run the set of commands to (possibly) build, run, and post the results
276 of our tests. Returns 0 on a successful run, 1 if we fail to post results or 335 of our tests. Returns 0 on a successful run, 1 if we fail to post results or
277 the run failed, -1 if the build is broken. 336 the run failed, -1 if the build is broken.
278 """ 337 """
279 suites = [] 338 suites = []
280 success = True 339 success = True
281 if not self.no_build and self.sync_and_build(suites, revision_num) == 1: 340 if not self.no_build and self.SyncAndBuild(suites, revision_num) == 1:
282 return -1 # The build is broken. 341 return -1 # The build is broken.
283 342
343 if not self.current_revision_num:
344 self.current_revision_num = SearchForRevision(DART_REPO_LOC)
345
284 for name in self.suite_names: 346 for name in self.suite_names:
285 for run in range(num_reruns): 347 for run in range(num_reruns):
286 suites += [TestBuilder.make_test(name, self)] 348 suites += [TestBuilder.MakeTest(name, self)]
287 349
288 for test in suites: 350 for test in suites:
289 success = success and test.run() 351 success = success and test.Run()
290 if success: 352 if success:
291 return 0 353 return 0
292 else: 354 else:
293 return 1 355 return 1
294 356
295 357
296 class Test(object): 358 class Test(object):
297 """The base class to provide shared code for different tests we will run and 359 """The base class to provide shared code for different tests we will run and
298 post. At a high level, each test has three visitors (the tester and the 360 post. At a high level, each test has three visitors (the tester and the
299 file_processor) that perform operations on the test object.""" 361 file_processor) that perform operations on the test object."""
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
336 for f in variants: 398 for f in variants:
337 self.revision_dict[platform][f] = dict() 399 self.revision_dict[platform][f] = dict()
338 self.values_dict[platform][f] = dict() 400 self.values_dict[platform][f] = dict()
339 for val in values_list: 401 for val in values_list:
340 self.revision_dict[platform][f][val] = [] 402 self.revision_dict[platform][f][val] = []
341 self.values_dict[platform][f][val] = [] 403 self.values_dict[platform][f][val] = []
342 for extra_metric in extra_metrics: 404 for extra_metric in extra_metrics:
343 self.revision_dict[platform][f][extra_metric] = [] 405 self.revision_dict[platform][f][extra_metric] = []
344 self.values_dict[platform][f][extra_metric] = [] 406 self.values_dict[platform][f][extra_metric] = []
345 407
346 def is_valid_combination(self, platform, variant): 408 def IsValidCombination(self, platform, variant):
347 """Check whether data should be captured for this platform/variant 409 """Check whether data should be captured for this platform/variant
348 combination. 410 combination.
349 """ 411 """
350 # TODO(vsm): This avoids a bug in 32-bit Chrome (dartium) 412 # TODO(vsm): This avoids a bug in 32-bit Chrome (dartium)
351 # running JS dromaeo. 413 # running JS dromaeo.
352 if platform == 'dartium' and variant == 'js': 414 if platform == 'dartium' and variant == 'js':
353 return False 415 return False
354 if (platform == 'safari' and variant == 'dart2js' and 416 if (platform == 'safari' and variant == 'dart2js' and
355 int(self.test_runner.current_revision_num) < 10193): 417 int(self.test_runner.current_revision_num) < 10193):
356 # In revision 10193 we fixed a bug that allows Safari 6 to run dart2js 418 # In revision 10193 we fixed a bug that allows Safari 6 to run dart2js
357 # code. Since we can't change the Safari version on the machine, we're 419 # code. Since we can't change the Safari version on the machine, we're
358 # just not running 420 # just not running
359 # for this case. 421 # for this case.
360 return False 422 return False
361 return True 423 return True
362 424
363 def run(self): 425 def Run(self):
364 """Run the benchmarks/tests from the command line and plot the 426 """Run the benchmarks/tests from the command line and plot the
365 results. 427 results.
366 """ 428 """
367 for visitor in [self.tester, self.file_processor]: 429 for visitor in [self.tester, self.file_processor]:
368 visitor.prepare() 430 visitor.Prepare()
369 431
370 os.chdir(TOP_LEVEL_DIR) 432 os.chdir(TOP_LEVEL_DIR)
371 self.test_runner.ensure_output_directory(self.result_folder_name) 433 self.test_runner.EnsureOutputDirectory(self.result_folder_name)
372 self.test_runner.ensure_output_directory(os.path.join( 434 self.test_runner.EnsureOutputDirectory(os.path.join(
373 'old', self.result_folder_name)) 435 'old', self.result_folder_name))
374 os.chdir(DART_REPO_LOC) 436 os.chdir(DART_REPO_LOC)
375 if not self.test_runner.no_test: 437 if not self.test_runner.no_test:
376 self.tester.run_tests() 438 self.tester.RunTests()
377 439
378 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'perf_testing')) 440 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'perf_testing'))
379 441
380 files = os.listdir(self.result_folder_name) 442 files = os.listdir(self.result_folder_name)
381 post_success = True 443 post_success = True
382 for afile in files: 444 for afile in files:
383 if not afile.startswith('.'): 445 if not afile.startswith('.'):
384 should_move_file = self.file_processor.process_file(afile, True) 446 should_move_file = self.file_processor.ProcessFile(afile, True)
385 if should_move_file: 447 if should_move_file:
386 shutil.move(os.path.join(self.result_folder_name, afile), 448 shutil.move(os.path.join(self.result_folder_name, afile),
387 os.path.join('old', self.result_folder_name, afile)) 449 os.path.join('old', self.result_folder_name, afile))
388 else: 450 else:
389 post_success = False 451 post_success = False
390 452
391 return post_success 453 return post_success
392 454
393 455
394 class Tester(object): 456 class Tester(object):
395 """The base level visitor class that runs tests. It contains convenience 457 """The base level visitor class that runs tests. It contains convenience
396 methods that many Tester objects use. Any class that would like to be a 458 methods that many Tester objects use. Any class that would like to be a
397 TesterVisitor must implement the run_tests() method.""" 459 TesterVisitor must implement the RunTests() method."""
398 460
399 def __init__(self, test): 461 def __init__(self, test):
400 self.test = test 462 self.test = test
401 463
402 def prepare(self): 464 def Prepare(self):
403 """Perform any initial setup required before the test is run.""" 465 """Perform any initial setup required before the test is run."""
404 pass 466 pass
405 467
406 def add_svn_revision_to_trace(self, outfile, browser = None): 468 def AddSvnRevisionToTrace(self, outfile, browser = None):
407 """Add the svn version number to the provided tracefile.""" 469 """Add the svn version number to the provided tracefile."""
408 def get_dartium_revision(): 470 def get_dartium_revision():
409 version_file_name = os.path.join(DART_REPO_LOC, 'client', 'tests', 471 version_file_name = os.path.join(DART_REPO_LOC, 'client', 'tests',
410 'dartium', 'LAST_VERSION') 472 'dartium', 'LAST_VERSION')
411 version_file = open(version_file_name, 'r') 473 version_file = open(version_file_name, 'r')
412 version = version_file.read().split('.')[-2] 474 version = version_file.read().split('.')[-2]
413 version_file.close() 475 version_file.close()
414 return version 476 return version
415 477
416 if browser and browser == 'dartium': 478 if browser and browser == 'dartium':
417 revision = get_dartium_revision() 479 revision = get_dartium_revision()
418 self.test.test_runner.run_cmd(['echo', 'Revision: ' + revision], outfile) 480 self.test.test_runner.RunCmd(['echo', 'Revision: ' + revision], outfile)
419 else: 481 else:
420 revision = search_for_revision() 482 revision = SearchForRevision()
421 self.test.test_runner.run_cmd(['echo', 'Revision: ' + revision], outfile) 483 self.test.test_runner.RunCmd(['echo', 'Revision: ' + revision], outfile)
422 484
423 485
424 class Processor(object): 486 class Processor(object):
425 """The base level vistor class that processes tests. It contains convenience 487 """The base level vistor class that processes tests. It contains convenience
426 methods that many File Processor objects use. Any class that would like to be 488 methods that many File Processor objects use. Any class that would like to be
427 a ProcessorVisitor must implement the process_file() method.""" 489 a ProcessorVisitor must implement the ProcessFile() method."""
428 490
429 SCORE = 'Score' 491 SCORE = 'Score'
430 COMPILE_TIME = 'CompileTime' 492 COMPILE_TIME = 'CompileTime'
431 CODE_SIZE = 'CodeSize' 493 CODE_SIZE = 'CodeSize'
432 494
433 def __init__(self, test): 495 def __init__(self, test):
434 self.test = test 496 self.test = test
435 497
436 def prepare(self): 498 def Prepare(self):
437 """Perform any initial setup required before the test is run.""" 499 """Perform any initial setup required before the test is run."""
438 pass 500 pass
439 501
440 def open_trace_file(self, afile, not_yet_uploaded): 502 def OpenTraceFile(self, afile, not_yet_uploaded):
441 """Find the correct location for the trace file, and open it. 503 """Find the correct location for the trace file, and open it.
442 Args: 504 Args:
443 afile: The tracefile name. 505 afile: The tracefile name.
444 not_yet_uploaded: True if this file is to be found in a directory that 506 not_yet_uploaded: True if this file is to be found in a directory that
445 contains un-uploaded data. 507 contains un-uploaded data.
446 Returns: A file object corresponding to the given file name.""" 508 Returns: A file object corresponding to the given file name."""
447 file_path = os.path.join(self.test.result_folder_name, afile) 509 file_path = os.path.join(self.test.result_folder_name, afile)
448 if not not_yet_uploaded: 510 if not not_yet_uploaded:
449 file_path = os.path.join('old', file_path) 511 file_path = os.path.join('old', file_path)
450 return open(file_path) 512 return open(file_path)
451 513
452 def report_results(self, benchmark_name, score, platform, variant, 514 def ReportResults(self, benchmark_name, score, platform, variant,
453 revision_number, metric): 515 revision_number, metric):
454 """Store the results of the benchmark run. 516 """Store the results of the benchmark run.
455 Args: 517 Args:
456 benchmark_name: The name of the individual benchmark. 518 benchmark_name: The name of the individual benchmark.
457 score: The numerical value of this benchmark. 519 score: The numerical value of this benchmark.
458 platform: The platform the test was run on (firefox, command line, etc). 520 platform: The platform the test was run on (firefox, command line, etc).
459 variant: Specifies whether the data was about generated Frog, js, a 521 variant: Specifies whether the data was about generated Frog, js, a
460 combination of both, or Dart depending on the test. 522 combination of both, or Dart depending on the test.
461 revision_number: The revision of the code (and sometimes the revision of 523 revision_number: The revision of the code (and sometimes the revision of
462 dartium). 524 dartium).
463 525
464 Returns: True if the post was successful file.""" 526 Returns: True if the post was successful file."""
465 return post_results.report_results(benchmark_name, score, platform, variant, 527 return post_results.report_results(benchmark_name, score, platform, variant,
466 revision_number, metric) 528 revision_number, metric)
467 529
468 def calculate_geometric_mean(self, platform, variant, svn_revision): 530 def CalculateGeometricMean(self, platform, variant, svn_revision):
469 """Calculate the aggregate geometric mean for JS and dart2js benchmark sets, 531 """Calculate the aggregate geometric mean for JS and dart2js benchmark sets,
470 given two benchmark dictionaries.""" 532 given two benchmark dictionaries."""
471 geo_mean = 0 533 geo_mean = 0
472 if self.test.is_valid_combination(platform, variant): 534 if self.test.IsValidCombination(platform, variant):
473 for benchmark in self.test.values_list: 535 for benchmark in self.test.values_list:
536 if not self.test.values_dict[platform][variant][benchmark]:
537 print 'Error determining mean for %s %s %s' % (platform, variant,
538 benchmark)
539 continue
474 geo_mean += math.log( 540 geo_mean += math.log(
475 self.test.values_dict[platform][variant][benchmark][ 541 self.test.values_dict[platform][variant][benchmark][-1])
476 len(self.test.values_dict[platform][variant][benchmark]) - 1])
477 542
478 self.test.values_dict[platform][variant]['Geo-Mean'] += \ 543 self.test.values_dict[platform][variant]['Geo-Mean'] += \
479 [math.pow(math.e, geo_mean / len(self.test.values_list))] 544 [math.pow(math.e, geo_mean / len(self.test.values_list))]
480 self.test.revision_dict[platform][variant]['Geo-Mean'] += [svn_revision] 545 self.test.revision_dict[platform][variant]['Geo-Mean'] += [svn_revision]
481 546
482 def get_score_type(self, benchmark_name): 547 def GetScoreType(self, benchmark_name):
483 """Determine the type of score for posting -- default is 'Score' (aka 548 """Determine the type of score for posting -- default is 'Score' (aka
484 Runtime), other options are CompileTime and CodeSize.""" 549 Runtime), other options are CompileTime and CodeSize."""
485 return self.SCORE 550 return self.SCORE
486 551
487 552
488 class RuntimePerformanceTest(Test): 553 class RuntimePerformanceTest(Test):
489 """Super class for all runtime performance testing.""" 554 """Super class for all runtime performance testing."""
490 555
491 def __init__(self, result_folder_name, platform_list, platform_type, 556 def __init__(self, result_folder_name, platform_list, platform_type,
492 versions, benchmarks, test_runner, tester, file_processor): 557 versions, benchmarks, test_runner, tester, file_processor):
(...skipping 18 matching lines...) Expand all
511 platform_list, versions, benchmarks, test_runner, tester, 576 platform_list, versions, benchmarks, test_runner, tester,
512 file_processor) 577 file_processor)
513 self.platform_list = platform_list 578 self.platform_list = platform_list
514 self.platform_type = platform_type 579 self.platform_type = platform_type
515 self.versions = versions 580 self.versions = versions
516 self.benchmarks = benchmarks 581 self.benchmarks = benchmarks
517 582
518 583
519 class BrowserTester(Tester): 584 class BrowserTester(Tester):
520 @staticmethod 585 @staticmethod
521 def get_browsers(add_dartium=True): 586 def GetBrowsers(add_dartium=True):
522 browsers = []#['ff', 'chrome'] 587 browsers = ['ff', 'chrome']
523 if add_dartium: 588 if add_dartium:
524 browsers += ['dartium'] 589 browsers += ['dartium']
525 has_shell = False 590 has_shell = False
526 if platform.system() == 'Darwin': 591 if platform.system() == 'Darwin':
527 browsers += ['safari'] 592 browsers += ['safari']
528 if platform.system() == 'Windows': 593 if platform.system() == 'Windows':
529 browsers += ['ie'] 594 browsers += ['ie']
530 has_shell = True 595 has_shell = True
531 return browsers 596 return browsers
532 597
533 598
534 class CommonBrowserTest(RuntimePerformanceTest): 599 class CommonBrowserTest(RuntimePerformanceTest):
535 """Runs this basic performance tests (Benchpress, some V8 benchmarks) in the 600 """Runs this basic performance tests (Benchpress, some V8 benchmarks) in the
536 browser.""" 601 browser."""
537 602
538 def __init__(self, test_runner): 603 def __init__(self, test_runner):
539 """Args: 604 """Args:
540 test_runner: Reference to the object that notifies us when to run.""" 605 test_runner: Reference to the object that notifies us when to run."""
541 super(CommonBrowserTest, self).__init__( 606 super(CommonBrowserTest, self).__init__(
542 self.name(), BrowserTester.get_browsers(False), 607 self.Name(), BrowserTester.GetBrowsers(False),
543 'browser', ['js', 'dart2js'], 608 'browser', ['js', 'dart2js'],
544 self.get_standalone_benchmarks(), test_runner, 609 self.GetStandaloneBenchmarks(), test_runner,
545 self.CommonBrowserTester(self), 610 self.CommonBrowserTester(self),
546 self.CommonBrowserFileProcessor(self)) 611 self.CommonBrowserFileProcessor(self))
547 612
548 @staticmethod 613 @staticmethod
549 def name(): 614 def Name():
550 return 'browser-perf' 615 return 'browser-perf'
551 616
552 @staticmethod 617 @staticmethod
553 def get_standalone_benchmarks(): 618 def GetStandaloneBenchmarks():
554 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees', 619 return ['Mandelbrot', 'DeltaBlue', 'Richards', 'NBody', 'BinaryTrees',
555 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute', 620 'Fannkuch', 'Meteor', 'BubbleSort', 'Fibonacci', 'Loop', 'Permute',
556 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers', 621 'Queens', 'QuickSort', 'Recurse', 'Sieve', 'Sum', 'Tak', 'Takl', 'Towers',
557 'TreeSort'] 622 'TreeSort']
558 623
559 class CommonBrowserTester(BrowserTester): 624 class CommonBrowserTester(BrowserTester):
560 def run_tests(self): 625 def RunTests(self):
561 """Run a performance test in the browser.""" 626 """Run a performance test in the browser."""
562 os.chdir(DART_REPO_LOC) 627 os.chdir(DART_REPO_LOC)
563 self.test.test_runner.run_cmd([ 628 self.test.test_runner.RunCmd([
564 'python', os.path.join('internal', 'browserBenchmarks', 629 'python', os.path.join('internal', 'browserBenchmarks',
565 'make_web_benchmarks.py')]) 630 'make_web_benchmarks.py')])
566 631
567 for browser in self.test.platform_list: 632 for browser in self.test.platform_list:
568 for version in self.test.versions: 633 for version in self.test.versions:
569 if not self.test.is_valid_combination(browser, version): 634 if not self.test.IsValidCombination(browser, version):
570 continue 635 continue
571 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, 636 self.test.trace_file = os.path.join(TOP_LEVEL_DIR,
572 'tools', 'testing', 'perf_testing', self.test.result_folder_name, 637 'tools', 'testing', 'perf_testing', self.test.result_folder_name,
573 'perf-%s-%s-%s' % (self.test.cur_time, browser, version)) 638 'perf-%s-%s-%s' % (self.test.cur_time, browser, version))
574 self.add_svn_revision_to_trace(self.test.trace_file, browser) 639 self.AddSvnRevisionToTrace(self.test.trace_file, browser)
575 file_path = os.path.join( 640 file_path = os.path.join(
576 os.getcwd(), 'internal', 'browserBenchmarks', 641 os.getcwd(), 'internal', 'browserBenchmarks',
577 'benchmark_page_%s.html' % version) 642 'benchmark_page_%s.html' % version)
578 self.test.test_runner.run_cmd( 643 self.test.test_runner.RunCmd(
579 ['python', os.path.join('tools', 'testing', 'run_selenium.py'), 644 ['python', os.path.join('tools', 'testing', 'run_selenium.py'),
580 '--out', file_path, '--browser', browser, 645 '--out', file_path, '--browser', browser,
581 '--timeout', '600', '--mode', 'perf'], self.test.trace_file, 646 '--timeout', '600', '--mode', 'perf'], self.test.trace_file,
582 append=True) 647 append=True)
583 648
584 class CommonBrowserFileProcessor(Processor): 649 class CommonBrowserFileProcessor(Processor):
585 650
586 def process_file(self, afile, should_post_file): 651 def ProcessFile(self, afile, should_post_file):
587 """Comb through the html to find the performance results. 652 """Comb through the html to find the performance results.
588 Returns: True if we successfully posted our data to storage and/or we can 653 Returns: True if we successfully posted our data to storage and/or we can
589 delete the trace file.""" 654 delete the trace file."""
590 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools', 655 os.chdir(os.path.join(TOP_LEVEL_DIR, 'tools',
591 'testing', 'perf_testing')) 656 'testing', 'perf_testing'))
592 parts = afile.split('-') 657 parts = afile.split('-')
593 browser = parts[2] 658 browser = parts[2]
594 version = parts[3] 659 version = parts[3]
595 f = self.open_trace_file(afile, should_post_file) 660 f = self.OpenTraceFile(afile, should_post_file)
596 lines = f.readlines() 661 lines = f.readlines()
597 line = '' 662 line = ''
598 i = 0 663 i = 0
599 revision_num = 0 664 revision_num = 0
600 while '<div id="results">' not in line and i < len(lines): 665 while '<div id="results">' not in line and i < len(lines):
601 if 'Revision' in line: 666 if 'Revision' in line:
602 revision_num = int(line.split()[1].strip('"')) 667 revision_num = int(line.split()[1].strip('"'))
603 line = lines[i] 668 line = lines[i]
604 i += 1 669 i += 1
605 670
(...skipping 16 matching lines...) Expand all
622 if len(name_and_score) < 2: 687 if len(name_and_score) < 2:
623 break 688 break
624 name = name_and_score[0].strip() 689 name = name_and_score[0].strip()
625 score = name_and_score[1].strip() 690 score = name_and_score[1].strip()
626 if version == 'js' or version == 'v8': 691 if version == 'js' or version == 'v8':
627 version = 'js' 692 version = 'js'
628 bench_dict = self.test.values_dict[browser][version] 693 bench_dict = self.test.values_dict[browser][version]
629 bench_dict[name] += [float(score)] 694 bench_dict[name] += [float(score)]
630 self.test.revision_dict[browser][version][name] += [revision_num] 695 self.test.revision_dict[browser][version][name] += [revision_num]
631 if not self.test.test_runner.no_upload and should_post_file: 696 if not self.test.test_runner.no_upload and should_post_file:
632 upload_success = upload_success and self.report_results( 697 upload_success = upload_success and self.ReportResults(
633 name, score, browser, version, revision_num, 698 name, score, browser, version, revision_num,
634 self.get_score_type(name)) 699 self.GetScoreType(name))
635 else: 700 else:
636 upload_success = False 701 upload_success = False
637 702
638 f.close() 703 f.close()
639 self.calculate_geometric_mean(browser, version, revision_num) 704 self.CalculateGeometricMean(browser, version, revision_num)
640 return upload_success 705 return upload_success
641 706
642 707
643 class DromaeoTester(Tester): 708 class DromaeoTester(Tester):
644 DROMAEO_BENCHMARKS = { 709 DROMAEO_BENCHMARKS = {
645 'attr': ('attributes', [ 710 'attr': ('attributes', [
646 'getAttribute', 711 'getAttribute',
647 'element.property', 712 'element.property',
648 'setAttribute', 713 'setAttribute',
649 'element.property = value']), 714 'element.property = value']),
(...skipping 17 matching lines...) Expand all
667 'traverse': ('traverse', [ 732 'traverse': ('traverse', [
668 'firstChild', 733 'firstChild',
669 'lastChild', 734 'lastChild',
670 'nextSibling', 735 'nextSibling',
671 'previousSibling', 736 'previousSibling',
672 'childNodes']) 737 'childNodes'])
673 } 738 }
674 739
675 # Use filenames that don't have unusual characters for benchmark names. 740 # Use filenames that don't have unusual characters for benchmark names.
676 @staticmethod 741 @staticmethod
677 def legalize_filename(str):» 742 def LegalizeFilename(str):
678 remap = { 743 remap = {
679 ' ': '_', 744 ' ': '_',
680 '(': '_', 745 '(': '_',
681 ')': '_', 746 ')': '_',
682 '*': 'ALL', 747 '*': 'ALL',
683 '=': 'ASSIGN', 748 '=': 'ASSIGN',
684 } 749 }
685 for (old, new) in remap.iteritems(): 750 for (old, new) in remap.iteritems():
686 str = str.replace(old, new) 751 str = str.replace(old, new)
687 return str 752 return str
688 753
689 # TODO(vsm): This is a hack to skip breaking tests. Triage this 754 # TODO(vsm): This is a hack to skip breaking tests. Triage this
690 # failure properly. The modify suite fails on 32-bit chrome, which 755 # failure properly. The modify suite fails on 32-bit chrome, which
691 # is the default on mac and win. 756 # is the default on mac and win.
692 @staticmethod 757 @staticmethod
693 def get_valid_dromaeo_tags(): 758 def GetValidDromaeoTags():
694 tags = [tag for (tag, _) in DromaeoTester.DROMAEO_BENCHMARKS.values()] 759 tags = [tag for (tag, _) in DromaeoTester.DROMAEO_BENCHMARKS.values()]
695 if platform.system() == 'Darwin' or platform.system() == 'Windows': 760 if platform.system() == 'Darwin' or platform.system() == 'Windows':
696 tags.remove('modify') 761 tags.remove('modify')
697 return tags 762 return tags
698 763
699 @staticmethod 764 @staticmethod
700 def get_dromaeo_benchmarks(): 765 def GetDromaeoBenchmarks():
701 valid = DromaeoTester.get_valid_dromaeo_tags() 766 valid = DromaeoTester.GetValidDromaeoTags()
702 benchmarks = reduce(lambda l1,l2: l1+l2, 767 benchmarks = reduce(lambda l1,l2: l1+l2,
703 [tests for (tag, tests) in 768 [tests for (tag, tests) in
704 DromaeoTester.DROMAEO_BENCHMARKS.values() 769 DromaeoTester.DROMAEO_BENCHMARKS.values()
705 if tag in valid]) 770 if tag in valid])
706 return map(DromaeoTester.legalize_filename, benchmarks) 771 return map(DromaeoTester.LegalizeFilename, benchmarks)
707 772
708 @staticmethod 773 @staticmethod
709 def get_dromaeo_versions(): 774 def GetDromaeoVersions():
710 return ['js', 'dart2js_html'] 775 return ['js', 'dart2js_html']
711 776
712 777
713 class DromaeoTest(RuntimePerformanceTest): 778 class DromaeoTest(RuntimePerformanceTest):
714 """Runs Dromaeo tests, in the browser.""" 779 """Runs Dromaeo tests, in the browser."""
715 def __init__(self, test_runner): 780 def __init__(self, test_runner):
716 super(DromaeoTest, self).__init__( 781 super(DromaeoTest, self).__init__(
717 self.name(), 782 self.Name(),
718 BrowserTester.get_browsers(True), 783 BrowserTester.GetBrowsers(True),
719 'browser', 784 'browser',
720 DromaeoTester.get_dromaeo_versions(), 785 DromaeoTester.GetDromaeoVersions(),
721 DromaeoTester.get_dromaeo_benchmarks(), test_runner, 786 DromaeoTester.GetDromaeoBenchmarks(), test_runner,
722 self.DromaeoPerfTester(self), 787 self.DromaeoPerfTester(self),
723 self.DromaeoFileProcessor(self)) 788 self.DromaeoFileProcessor(self))
724 789
725 @staticmethod 790 @staticmethod
726 def name(): 791 def Name():
727 return 'dromaeo' 792 return 'dromaeo'
728 793
729 class DromaeoPerfTester(DromaeoTester): 794 class DromaeoPerfTester(DromaeoTester):
730 def move_chrome_driver_if_needed(self, browser): 795 def MoveChromeDriverIfNeeded(self, browser):
731 """Move the appropriate version of ChromeDriver onto the path. 796 """Move the appropriate version of ChromeDriver onto the path.
732 TODO(efortuna): This is a total hack because the latest version of Chrome 797 TODO(efortuna): This is a total hack because the latest version of Chrome
733 (Dartium builds) requires a different version of ChromeDriver, that is 798 (Dartium builds) requires a different version of ChromeDriver, that is
734 incompatible with the release or beta Chrome and vice versa. Remove these 799 incompatible with the release or beta Chrome and vice versa. Remove these
735 shenanigans once we're back to both versions of Chrome using the same 800 shenanigans once we're back to both versions of Chrome using the same
736 version of ChromeDriver. IMPORTANT NOTE: This assumes your chromedriver is 801 version of ChromeDriver. IMPORTANT NOTE: This assumes your chromedriver is
737 in the default location (inside depot_tools). 802 in the default location (inside depot_tools).
738 """ 803 """
739 current_dir = os.getcwd() 804 current_dir = os.getcwd()
740 self.test.test_runner.get_archive('chromedriver') 805 self.test.test_runner.GetArchive('chromedriver')
741 path = os.environ['PATH'].split(os.pathsep) 806 path = os.environ['PATH'].split(os.pathsep)
742 orig_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', 'testing', 807 orig_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', 'testing',
743 'orig-chromedriver') 808 'orig-chromedriver')
744 dartium_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools', 809 dartium_chromedriver_path = os.path.join(DART_REPO_LOC, 'tools',
745 'testing', 810 'testing',
746 'dartium-chromedriver') 811 'dartium-chromedriver')
747 extension = '' 812 extension = ''
748 if platform.system() == 'Windows': 813 if platform.system() == 'Windows':
749 extension = '.exe' 814 extension = '.exe'
750 815
751 def move_chromedriver(depot_tools, copy_to_depot_tools_dir=True, 816 def MoveChromedriver(depot_tools, copy_to_depot_tools_dir=True,
752 from_path=None): 817 from_path=None):
753 if from_path: 818 if from_path:
754 from_dir = from_path + extension 819 from_dir = from_path + extension
755 else: 820 else:
756 from_dir = os.path.join(orig_chromedriver_path, 821 from_dir = os.path.join(orig_chromedriver_path,
757 'chromedriver' + extension) 822 'chromedriver' + extension)
758 to_dir = os.path.join(depot_tools, 'chromedriver' + extension) 823 to_dir = os.path.join(depot_tools, 'chromedriver' + extension)
759 if not copy_to_depot_tools_dir: 824 if not copy_to_depot_tools_dir:
760 tmp = to_dir 825 tmp = to_dir
761 to_dir = from_dir 826 to_dir = from_dir
762 from_dir = tmp 827 from_dir = tmp
763 print >> sys.stderr, from_dir 828 print >> sys.stderr, from_dir
764 print >> sys.stderr, to_dir 829 print >> sys.stderr, to_dir
765 if not os.path.exists(os.path.dirname(to_dir)): 830 if not os.path.exists(os.path.dirname(to_dir)):
766 os.makedirs(os.path.dirname(to_dir)) 831 os.makedirs(os.path.dirname(to_dir))
767 shutil.copyfile(from_dir, to_dir) 832 shutil.copyfile(from_dir, to_dir)
768 833
769 for loc in path: 834 for loc in path:
770 if 'depot_tools' in loc: 835 if 'depot_tools' in loc:
771 if browser == 'chrome': 836 if browser == 'chrome':
772 if os.path.exists(orig_chromedriver_path): 837 if os.path.exists(orig_chromedriver_path):
773 move_chromedriver(loc) 838 MoveChromedriver(loc)
774 elif browser == 'dartium': 839 elif browser == 'dartium':
775 if self.test.test_runner.current_revision_num < FIRST_CHROMEDRIVER: 840 if (int(self.test.test_runner.current_revision_num) <
841 FIRST_CHROMEDRIVER):
776 # If we don't have a stashed a different chromedriver just use 842 # If we don't have a stashed a different chromedriver just use
777 # the regular chromedriver. 843 # the regular chromedriver.
778 self.test.test_runner.run_cmd(os.path.join( 844 self.test.test_runner.RunCmd(os.path.join(
779 TOP_LEVEL_DIR, 'tools', 'testing', 'webdriver_test_setup.py'), 845 TOP_LEVEL_DIR, 'tools', 'testing', 'webdriver_test_setup.py'),
780 '-f', '-s', '-p') 846 '-f', '-s', '-p')
781 elif not os.path.exists(dartium_chromedriver_path): 847 elif not os.path.exists(dartium_chromedriver_path):
782 self.test.test_runner.get_archive('chromedriver') 848 stdout, _ = self.test.test_runner.GetArchive('chromedriver')
783 # Move original chromedriver for storage. 849 # Move original chromedriver for storage.
784 if not os.path.exists(orig_chromedriver_path): 850 if not os.path.exists(orig_chromedriver_path):
785 move_chromedriver(loc, copy_to_depot_tools_dir=False) 851 MoveChromedriver(loc, copy_to_depot_tools_dir=False)
786 if self.test.test_runner.current_revision_num >= FIRST_CHROMEDRIVER: 852 if self.test.test_runner.current_revision_num >= FIRST_CHROMEDRIVER:
787 # Copy Dartium chromedriver into depot_tools 853 # Copy Dartium chromedriver into depot_tools
788 move_chromedriver(loc, from_path=os.path.join( 854 MoveChromedriver(loc, from_path=os.path.join(
789 dartium_chromedriver_path, 'chromedriver')) 855 dartium_chromedriver_path, 'chromedriver'))
790 os.chdir(current_dir) 856 os.chdir(current_dir)
791 857
792 def run_tests(self): 858 def RunTests(self):
793 """Run dromaeo in the browser.""" 859 """Run dromaeo in the browser."""
794 860
795 self.test.test_runner.get_archive('dartium') 861 self.test.test_runner.GetArchive('dartium')
796 862
797 # Build tests. 863 # Build tests.
798 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo') 864 dromaeo_path = os.path.join('samples', 'third_party', 'dromaeo')
799 current_path = os.getcwd() 865 current_path = os.getcwd()
800 os.chdir(dromaeo_path) 866 os.chdir(dromaeo_path)
801 self.test.test_runner.run_cmd(['python', 'generate_dart2js_tests.py']) 867 if os.path.exists('generate_dart2js_tests.py'):
868 stdout, _ = self.test.test_runner.RunCmd(
869 ['python', 'generate_dart2js_tests.py'])
870 else:
871 stdout, _ = self.test.test_runner.RunCmd(
872 ['python', 'generate_frog_tests.py'])
802 os.chdir(current_path) 873 os.chdir(current_path)
874 if 'Error: Compilation failed' in stdout:
875 return
876 versions = DromaeoTester.GetDromaeoVersions()
803 877
804 versions = DromaeoTester.get_dromaeo_versions() 878 for browser in BrowserTester.GetBrowsers():
805 879 self.MoveChromeDriverIfNeeded(browser)
806 for browser in BrowserTester.get_browsers():
807 self.move_chrome_driver_if_needed(browser)
808 for version_name in versions: 880 for version_name in versions:
809 if not self.test.is_valid_combination(browser, version_name): 881 if not self.test.IsValidCombination(browser, version_name):
810 continue 882 continue
811 version = DromaeoTest.DromaeoPerfTester.get_dromaeo_url_query( 883 version = DromaeoTest.DromaeoPerfTester.GetDromaeoUrlQuery(
812 browser, version_name) 884 browser, version_name)
813 self.test.trace_file = os.path.join(TOP_LEVEL_DIR, 885 self.test.trace_file = os.path.join(TOP_LEVEL_DIR,
814 'tools', 'testing', 'perf_testing', self.test.result_folder_name, 886 'tools', 'testing', 'perf_testing', self.test.result_folder_name,
815 'dromaeo-%s-%s-%s' % (self.test.cur_time, browser, version_name)) 887 'dromaeo-%s-%s-%s' % (self.test.cur_time, browser, version_name))
816 self.add_svn_revision_to_trace(self.test.trace_file, browser) 888 self.AddSvnRevisionToTrace(self.test.trace_file, browser)
817 file_path = '"%s"' % os.path.join(os.getcwd(), dromaeo_path, 889 file_path = '"%s"' % os.path.join(os.getcwd(), dromaeo_path,
818 'index-js.html?%s' % version) 890 'index-js.html?%s' % version)
819 if platform.system() == 'Windows': 891 self.test.test_runner.RunCmd(
820 file_path = file_path.replace('&', '^&')
821 file_path = file_path.replace('?', '^?')
822 file_path = file_path.replace('|', '^|')
823 self.test.test_runner.run_cmd(
824 ['python', os.path.join('tools', 'testing', 'run_selenium.py'), 892 ['python', os.path.join('tools', 'testing', 'run_selenium.py'),
825 '--out', file_path, '--browser', browser, 893 '--out', file_path, '--browser', browser,
826 '--timeout', '900', '--mode', 'dromaeo'], self.test.trace_file, 894 '--timeout', '900', '--mode', 'dromaeo'], self.test.trace_file,
827 append=True) 895 append=True)
828 # Put default Chromedriver back in. 896 # Put default Chromedriver back in.
829 self.move_chrome_driver_if_needed('chrome') 897 self.MoveChromeDriverIfNeeded('chrome')
830 898
831 @staticmethod 899 @staticmethod
832 def get_dromaeo_url_query(browser, version): 900 def GetDromaeoUrlQuery(browser, version):
833 if browser == 'dartium': 901 if browser == 'dartium':
834 version = version.replace('frog', 'dart') 902 version = version.replace('frog', 'dart')
835 version = version.replace('_','&') 903 version = version.replace('_','&')
836 tags = DromaeoTester.get_valid_dromaeo_tags() 904 tags = DromaeoTester.GetValidDromaeoTags()
837 return '|'.join([ '%s&%s' % (version, tag) for tag in tags]) 905 return '|'.join([ '%s&%s' % (version, tag) for tag in tags])
838 906
839 907
840 class DromaeoFileProcessor(Processor): 908 class DromaeoFileProcessor(Processor):
841 def process_file(self, afile, should_post_file): 909 def ProcessFile(self, afile, should_post_file):
842 """Comb through the html to find the performance results. 910 """Comb through the html to find the performance results.
843 Returns: True if we successfully posted our data to storage.""" 911 Returns: True if we successfully posted our data to storage."""
844 parts = afile.split('-') 912 parts = afile.split('-')
845 browser = parts[2] 913 browser = parts[2]
846 version = parts[3] 914 version = parts[3]
847 915
848 bench_dict = self.test.values_dict[browser][version] 916 bench_dict = self.test.values_dict[browser][version]
849 917
850 f = self.open_trace_file(afile, should_post_file) 918 f = self.OpenTraceFile(afile, should_post_file)
851 lines = f.readlines() 919 lines = f.readlines()
852 i = 0 920 i = 0
853 revision_num = 0 921 revision_num = 0
854 revision_pattern = r'Revision: (\d+)' 922 revision_pattern = r'Revision: (\d+)'
855 suite_pattern = r'<div class="result-item done">(.+?)</ol></div>' 923 suite_pattern = r'<div class="result-item done">(.+?)</ol></div>'
856 result_pattern = r'<b>(.+?)</b>(.+?)<small> runs/s(.+)' 924 result_pattern = r'<b>(.+?)</b>(.+?)<small> runs/s(.+)'
857 925
858 upload_success = True 926 upload_success = True
859 for line in lines: 927 for line in lines:
860 rev = re.match(revision_pattern, line.strip()) 928 rev = re.match(revision_pattern, line.strip())
861 if rev: 929 if rev:
862 revision_num = int(rev.group(1)) 930 revision_num = int(rev.group(1))
863 continue 931 continue
864 932
865 suite_results = re.findall(suite_pattern, line) 933 suite_results = re.findall(suite_pattern, line)
866 if suite_results: 934 if suite_results:
867 for suite_result in suite_results: 935 for suite_result in suite_results:
868 results = re.findall(r'<li>(.*?)</li>', suite_result) 936 results = re.findall(r'<li>(.*?)</li>', suite_result)
869 if results: 937 if results:
870 for result in results: 938 for result in results:
871 r = re.match(result_pattern, result) 939 r = re.match(result_pattern, result)
872 name = DromaeoTester.legalize_filename(r.group(1).strip(':')) 940 name = DromaeoTester.LegalizeFilename(r.group(1).strip(':'))
873 score = float(r.group(2)) 941 score = float(r.group(2))
874 bench_dict[name] += [float(score)] 942 bench_dict[name] += [float(score)]
875 self.test.revision_dict[browser][version][name] += \ 943 self.test.revision_dict[browser][version][name] += \
876 [revision_num] 944 [revision_num]
877 if not self.test.test_runner.no_upload and should_post_file: 945 if not self.test.test_runner.no_upload and should_post_file:
878 upload_success = upload_success and self.report_results( 946 upload_success = upload_success and self.ReportResults(
879 name, score, browser, version, revision_num, 947 name, score, browser, version, revision_num,
880 self.get_score_type(name)) 948 self.GetScoreType(name))
881 else: 949 else:
882 upload_success = False 950 upload_success = False
883 951
884 f.close() 952 f.close()
885 self.calculate_geometric_mean(browser, version, revision_num) 953 self.CalculateGeometricMean(browser, version, revision_num)
886 return upload_success 954 return upload_success
887 955
888 class TestBuilder(object): 956 class TestBuilder(object):
889 """Construct the desired test object.""" 957 """Construct the desired test object."""
890 available_suites = dict((suite.name(), suite) for suite in [ 958 available_suites = dict((suite.Name(), suite) for suite in [
891 CommonBrowserTest, DromaeoTest]) 959 CommonBrowserTest, DromaeoTest])
892 960
893 @staticmethod 961 @staticmethod
894 def make_test(test_name, test_runner): 962 def MakeTest(test_name, test_runner):
895 return TestBuilder.available_suites[test_name](test_runner) 963 return TestBuilder.available_suites[test_name](test_runner)
896 964
897 @staticmethod 965 @staticmethod
898 def available_suite_names(): 966 def AvailableSuiteNames():
899 return TestBuilder.available_suites.keys() 967 return TestBuilder.available_suites.keys()
900 968
901 def search_for_revision(directory = None): 969
970 def SearchForRevision(directory = None):
902 """Find the current revision number in the desired directory. If directory is 971 """Find the current revision number in the desired directory. If directory is
903 None, find the revision number in the current directory.""" 972 None, find the revision number in the current directory."""
904 def find_revision(svn_info_command): 973 def FindRevision(svn_info_command):
905 p = subprocess.Popen(svn_info_command, stdout = subprocess.PIPE, 974 p = subprocess.Popen(svn_info_command, stdout = subprocess.PIPE,
906 stderr = subprocess.STDOUT, 975 stderr = subprocess.STDOUT,
907 shell = (platform.system() == 'Windows')) 976 shell = (platform.system() == 'Windows'))
908 output, _ = p.communicate() 977 output, _ = p.communicate()
909 for line in output.split('\n'): 978 for line in output.split('\n'):
910 if 'Revision' in line: 979 if 'Revision' in line:
911 return int(line.split()[1]) 980 return int(line.split()[1])
912 return -1 981 return -1
913 982
914 cwd = os.getcwd() 983 cwd = os.getcwd()
915 if not directory: 984 if not directory:
916 directory = cwd 985 directory = cwd
917 os.chdir(directory) 986 os.chdir(directory)
918 revision_num = int(find_revision(['svn', 'info'])) 987 revision_num = int(FindRevision(['svn', 'info']))
919 if revision_num == -1: 988 if revision_num == -1:
920 revision_num = int(find_revision(['git', 'svn', 'info'])) 989 revision_num = int(FindRevision(['git', 'svn', 'info']))
921 os.chdir(cwd) 990 os.chdir(cwd)
922 return str(revision_num) 991 return str(revision_num)
923 992
924 def update_set_of_done_cls(revision_num=None): 993
994 def UpdateSetOfDoneCls(revision_num=None):
925 """Update the set of CLs that do not need additional performance runs. 995 """Update the set of CLs that do not need additional performance runs.
926 Args: 996 Args:
927 revision_num: an additional number to be added to the 'done set' 997 revision_num: an additional number to be added to the 'done set'
928 """ 998 """
929 filename = os.path.join(TOP_LEVEL_DIR, 'cached_results.txt') 999 filename = os.path.join(TOP_LEVEL_DIR, 'cached_results.txt')
930 if not os.path.exists(filename): 1000 if not os.path.exists(filename):
931 f = open(filename, 'w') 1001 f = open(filename, 'w')
932 results = set() 1002 results = set()
933 pickle.dump(results, f) 1003 pickle.dump(results, f)
934 f.close() 1004 f.close()
935 f = open(filename, 'r+') 1005 f = open(filename, 'r+')
936 result_set = pickle.load(f) 1006 result_set = pickle.load(f)
937 if revision_num: 1007 if revision_num:
938 f.seek(0) 1008 f.seek(0)
939 result_set.add(revision_num) 1009 result_set.add(revision_num)
940 pickle.dump(result_set, f) 1010 pickle.dump(result_set, f)
941 f.close() 1011 f.close()
942 return result_set 1012 return result_set
943 1013
944 def fill_in_back_history(results_set, runner): 1014
1015 def FillInBackHistory(results_set, runner):
945 """Fill in back history performance data. This is done one of two ways, with 1016 """Fill in back history performance data. This is done one of two ways, with
946 equal probability of trying each way (falling back on the sequential version 1017 equal probability of trying each way (falling back on the sequential version
947 as our data becomes more densely populated).""" 1018 as our data becomes more densely populated)."""
948 has_run_extra = False 1019 has_run_extra = False
949 revision_num = int(search_for_revision(DART_REPO_LOC)) 1020 revision_num = int(SearchForRevision(DART_REPO_LOC))
950 1021
951 def try_to_run_additional(revision_number): 1022 def TryToRunAdditional(revision_number):
952 """Determine the number of results we have stored for a particular revision 1023 """Determine the number of results we have stored for a particular revision
953 number, and if it is less than 10, run some extra tests. 1024 number, and if it is less than 10, run some extra tests.
954 Args: 1025 Args:
955 - revision_number: the revision whose performance we want to potentially 1026 - revision_number: the revision whose performance we want to potentially
956 test. 1027 test.
957 Returns: True if we successfully ran some additional tests.""" 1028 Returns: True if we successfully ran some additional tests."""
958 if not runner.has_interesting_code(revision_number): 1029 if not runner.HasInterestingCode(revision_number)[0]:
959 results_set = update_set_of_done_cls(revision_number) 1030 results_set = UpdateSetOfDoneCls(revision_number)
960 return False 1031 return False
961 a_test = TestBuilder.make_test(runner.suite_names[0], runner) 1032 a_test = TestBuilder.MakeTest(runner.suite_names[0], runner)
962 benchmark_name = a_test.values_list[0] 1033 benchmark_name = a_test.values_list[0]
963 platform_name = a_test.platform_list[0] 1034 platform_name = a_test.platform_list[0]
964 variant = a_test.values_dict[platform_name].keys()[0] 1035 variant = a_test.values_dict[platform_name].keys()[0]
965 num_results = post_results.get_num_results(benchmark_name, 1036 num_results = post_results.get_num_results(benchmark_name,
966 platform_name, variant, revision_number, 1037 platform_name, variant, revision_number,
967 a_test.file_processor.get_score_type(benchmark_name)) 1038 a_test.file_processor.GetScoreType(benchmark_name))
968 if num_results < 10: 1039 if num_results < 10:
969 # Run at most two more times. 1040 # Run at most two more times.
970 if num_results > 8: 1041 if num_results > 8:
971 reruns = 10 - num_results 1042 reruns = 10 - num_results
972 else: 1043 else:
973 reruns = 2 1044 reruns = 2
974 run = runner.run_test_sequence(revision_num=str(revision_number), 1045 run = runner.RunTestSequence(revision_num=str(revision_number),
975 num_reruns=reruns) 1046 num_reruns=reruns)
976 if num_results >= 10 or run == 0 and num_results + reruns >= 10: 1047 if num_results >= 10 or run == 0 and num_results + reruns >= 10:
977 results_set = update_set_of_done_cls(revision_number) 1048 results_set = UpdateSetOfDoneCls(revision_number)
978 else: 1049 else:
979 return False 1050 return False
980 return True 1051 return True
981 1052
982 if random.choice([True, False]): 1053 if random.choice([True, False]):
983 # Select a random CL number, with greater likelihood of selecting a CL in 1054 # Select a random CL number, with greater likelihood of selecting a CL in
984 # the more recent history than the distant past (using a simplified weighted 1055 # the more recent history than the distant past (using a simplified weighted
985 # bucket algorithm). If that CL has less than 10 runs, run additional. If it 1056 # bucket algorithm). If that CL has less than 10 runs, run additional. If it
986 # already has 10 runs, look for another CL number that is not yet have all 1057 # already has 10 runs, look for another CL number that is not yet have all
987 # of its additional runs (do this up to 15 times). 1058 # of its additional runs (do this up to 15 times).
988 tries = 0 1059 tries = 0
989 # Select which "thousands bucket" we're going to run additional tests for. 1060 # Select which "thousands bucket" we're going to run additional tests for.
990 bucket_size = 1000 1061 bucket_size = 1000
991 thousands_list = range(EARLIEST_REVISION/bucket_size, 1062 thousands_list = range(EARLIEST_REVISION/bucket_size,
992 int(revision_num)/bucket_size + 1) 1063 int(revision_num)/bucket_size + 1)
993 weighted_total = sum(thousands_list) 1064 weighted_total = sum(thousands_list)
994 generated_random_number = random.randint(0, weighted_total - 1) 1065 generated_random_number = random.randint(0, weighted_total - 1)
995 for i in list(reversed(thousands_list)): 1066 for i in list(reversed(thousands_list)):
996 thousands = thousands_list[i - 1] 1067 thousands = i
997 weighted_total -= thousands_list[i - 1] 1068 weighted_total -= i
998 if weighted_total <= generated_random_number: 1069 if weighted_total <= generated_random_number:
999 break 1070 break
1000 while tries < 15 and not has_run_extra: 1071 while tries < 15 and not has_run_extra:
1001 # Now select a particular revision in that bucket. 1072 # Now select a particular revision in that bucket.
1002 if thousands == int(revision_num)/bucket_size: 1073 if thousands == int(revision_num)/bucket_size:
1003 max_range = 1 + revision_num % bucket_size 1074 max_range = 1 + revision_num % bucket_size
1004 else: 1075 else:
1005 max_range = bucket_size 1076 max_range = bucket_size
1006 rev = thousands * bucket_size + random.randrange(0, max_range) 1077 rev = thousands * bucket_size + random.randrange(0, max_range)
1007 if rev not in results_set: 1078 if rev not in results_set:
1008 has_run_extra = try_to_run_additional(rev) 1079 has_run_extra = TryToRunAdditional(rev)
1009 tries += 1 1080 tries += 1
1010 1081
1011 if not has_run_extra: 1082 if not has_run_extra:
1012 # Try to get up to 10 runs of each CL, starting with the most recent 1083 # Try to get up to 10 runs of each CL, starting with the most recent
1013 # CL that does not yet have 10 runs. But only perform a set of extra 1084 # CL that does not yet have 10 runs. But only perform a set of extra
1014 # runs at most 2 at a time before checking to see if new code has been 1085 # runs at most 2 at a time before checking to see if new code has been
1015 # checked in. 1086 # checked in.
1016 while revision_num > EARLIEST_REVISION and not has_run_extra: 1087 while revision_num > EARLIEST_REVISION and not has_run_extra:
1017 if revision_num not in results_set: 1088 if revision_num not in results_set:
1018 has_run_extra = try_to_run_additional(revision_num) 1089 has_run_extra = TryToRunAdditional(revision_num)
1019 revision_num -= 1 1090 revision_num -= 1
1020 if not has_run_extra: 1091 if not has_run_extra:
1021 # No more extra back-runs to do (for now). Wait for new code. 1092 # No more extra back-runs to do (for now). Wait for new code.
1022 time.sleep(200) 1093 time.sleep(200)
1023 return results_set 1094 return results_set
1024 1095
1096
1025 def main(): 1097 def main():
1026 runner = TestRunner() 1098 runner = TestRunner()
1027 continuous = runner.parse_args() 1099 continuous = runner.ParseArgs()
1028 1100
1029 if not os.path.exists(DART_REPO_LOC): 1101 if not os.path.exists(DART_REPO_LOC):
1030 os.mkdir(dirname(DART_REPO_LOC)) 1102 os.mkdir(dirname(DART_REPO_LOC))
1031 os.chdir(dirname(DART_REPO_LOC)) 1103 os.chdir(dirname(DART_REPO_LOC))
1032 p = subprocess.Popen('gclient config https://dart.googlecode.com/svn/' + 1104 p = subprocess.Popen('gclient config https://dart.googlecode.com/svn/' +
1033 'branches/bleeding_edge/deps/all.deps', 1105 'branches/bleeding_edge/deps/all.deps',
1034 stdout=subprocess.PIPE, stderr=subprocess.PIPE, 1106 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1035 shell=True) 1107 shell=True)
1036 p.communicate() 1108 p.communicate()
1037 if continuous: 1109 if continuous:
1038 while True: 1110 while True:
1039 results_set = update_set_of_done_cls() 1111 results_set = UpdateSetOfDoneCls()
1040 if runner.has_interesting_code(): 1112 interesting_code_results = runner.HasInterestingCode()
sra1 2012/08/21 22:59:36 Destructuring is often clearer and prevents bugs i
Emily Fortuna 2012/08/22 21:08:20 Done.
1041 runner.run_test_sequence() 1113 if interesting_code_results[0]:
1114 runner.RunTestSequence(interesting_code_results[1])
1042 else: 1115 else:
1043 results_set = fill_in_back_history(results_set, runner) 1116 if runner.backfill:
1117 results_set = FillInBackHistory(results_set, runner)
1118 else:
1119 time.sleep(200)
1044 else: 1120 else:
1045 runner.run_test_sequence() 1121 runner.RunTestSequence()
1046 1122
1047 if __name__ == '__main__': 1123 if __name__ == '__main__':
1048 main() 1124 main()
OLDNEW
« no previous file with comments | « no previous file | tools/testing/webdriver_test_setup.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698