Index: tools/testing/perf_testing/run_perf_tests.py |
=================================================================== |
--- tools/testing/perf_testing/run_perf_tests.py (revision 10935) |
+++ tools/testing/perf_testing/run_perf_tests.py (working copy) |
@@ -109,15 +109,8 @@ |
_, stderr = self.run_cmd(cmd) |
return stderr |
- def sync_and_build(self, suites, revision_num=''): |
- """Make sure we have the latest version of of the repo, and build it. We |
- begin and end standing in DART_REPO_LOC. |
- |
- Args: |
- suites: The set of suites that we wish to build. |
- |
- Returns: |
- err_code = 1 if there was a problem building.""" |
+ def _sync(self, revision_num=''): |
sra1
2012/08/18 23:08:55
nit: we would usually use None for the default val
Emily Fortuna
2012/08/20 19:59:05
Done.
|
+ """Update the repository to the latest or specified revision.""" |
os.chdir(dirname(DART_REPO_LOC)) |
self.clear_out_unversioned_files() |
if revision_num == '': |
@@ -133,6 +126,16 @@ |
os.path.join(TOP_LEVEL_DIR, 'tools', 'testing', 'run_selenium.py'), |
os.path.join(DART_REPO_LOC, 'tools', 'testing', 'run_selenium.py')) |
+ def sync_and_build(self, suites, revision_num=''): |
sra1
2012/08/18 23:08:55
Google style is to use MixedCase for functions and
Emily Fortuna
2012/08/20 19:59:05
Not for methods:
http://google-styleguide.googlec
sra1
2012/08/21 22:59:36
The second link says "Python code should follow PE
Emily Fortuna
2012/08/22 21:08:20
Yea, I was reading the wrong section of the style
|
+ """Make sure we have the latest version of of the repo, and build it. We |
+ begin and end standing in DART_REPO_LOC. |
+ |
+ Args: |
+ suites: The set of suites that we wish to build. |
+ |
+ Returns: |
+ err_code = 1 if there was a problem building.""" |
+ self._sync(revision_num) |
if revision_num == '': |
revision_num = search_for_revision() |
@@ -181,45 +184,73 @@ |
def has_interesting_code(self, past_revision_num=None): |
"""Tests if there are any versions of files that might change performance |
- results on the server.""" |
+ results on the server. |
+ Returns a tuple of a boolean and potentially a revision number that has |
+ interesting code. (If the boolean is false, there is no interesting code, |
+ therefore the second element in the tuple will be None.)""" |
if not os.path.exists(DART_REPO_LOC): |
- return True |
+ self._sync() |
os.chdir(DART_REPO_LOC) |
no_effect = ['client', 'compiler', 'editor', 'pkg', 'samples', 'tests', |
- 'third_party', 'tools', 'utils'] |
- # Pass 'p' in if we have a new certificate for the svn server, we want to |
- # (p)ermanently accept it. |
- if past_revision_num: |
+ 'third_party', 'utils'] |
+ def get_svn_log_data(revision): |
sra1
2012/08/18 23:08:55
The function is not really getting log data. it i
Emily Fortuna
2012/08/20 19:59:05
Made into a global function. No unit tests for thi
|
+ """Determine the set of files that were changed for a particular |
+ revision.""" |
# TODO(efortuna): This assumes you're using svn. Have a git fallback as |
# well. |
+ # Pass 'p' in if we have a new certificate for the svn server, we want to |
+ # (p)ermanently accept it. |
results, _ = self.run_cmd(['svn', 'log', '-v', '-r', |
- str(past_revision_num)], std_in='p\r\n') |
+ str(revision)], std_in='p\r\n') |
results = results.split('\n') |
if len(results) <= 3: |
- results = [] |
+ return [] |
else: |
# Trim off the details about revision number and commit message. We're |
# only interested in the files that are changed. |
results = results[3:] |
changed_files = [] |
for result in results: |
- if result == '': |
+ if len(result) <= 1: |
break |
changed_files += [result.replace('/branches/bleeding_edge/dart/', '')] |
sra1
2012/08/18 23:08:55
Don't strip of 'dart', you don't want to miss /bra
Emily Fortuna
2012/08/20 19:59:05
Sort-of-done (with a TODO)... This requires me to
|
- results = changed_files |
+ return changed_files |
+ |
+ def has_perf_affecting_results(results): |
sra1
2012/08/18 23:08:55
'results' is kind of generic.
Is this a list of li
Emily Fortuna
2012/08/20 19:59:05
Done.
|
+ """Determine if this set of changed files might effect performance |
+ tests.""" |
+ for line in results: |
+ tokens = line.split() |
+ if len(tokens) >= 1: |
+ # Loop through the changed files to see if it contains any files that |
+ # are NOT listed in the no_effect list (directories not listed in |
+ # the "no_effect" list are assumed to potentially affect performance. |
+ if not reduce(lambda x, y: x or y, |
+ [tokens[-1].startswith(item) for item in no_effect], False): |
sra1
2012/08/18 23:08:55
If the filenames are cleanly extracted, and the no
Emily Fortuna
2012/08/20 19:59:05
Done.
|
+ return True |
+ return False |
+ |
+ if past_revision_num: |
+ return (has_perf_affecting_results(get_svn_log_data(past_revision_num)), |
+ past_revision_num) |
else: |
results, _ = self.run_cmd(['svn', 'st', '-u'], std_in='p\r\n') |
- results = results.split('\n') |
- for line in results: |
- tokens = line.split() |
- if past_revision_num or len(tokens) >= 3 and '*' in tokens[-3]: |
- # Loop through the changed files to see if it contains any files that |
- # are NOT listed in the no_effect list (directories not listed in |
- # the "no_effect" list are assumed to potentially affect performance. |
- if not reduce(lambda x, y: x or y, |
- [tokens[-1].startswith(item) for item in no_effect], False): |
- return True |
- return False |
+ latest_interesting_server_rev = int(results.split('\n')[-2].split()[-1]) |
+ done_cls = list(update_set_of_done_cls()) |
+ done_cls.sort() |
+ if len(done_cls) == 0: |
sra1
2012/08/18 23:08:55
idoimatic python tests for non-empty list like thi
Emily Fortuna
2012/08/20 19:59:05
Done.
|
+ last_done_cl = EARLIEST_REVISION |
+ else: |
+ last_done_cl = int(done_cls[-1]) |
+ |
+ while latest_interesting_server_rev >= last_done_cl: |
+ results = get_svn_log_data(latest_interesting_server_rev) |
+ if has_perf_affecting_results(results): |
+ return (True, latest_interesting_server_rev) |
+ else: |
+ update_set_of_done_cls(latest_interesting_server_rev) |
+ latest_interesting_server_rev -= 1 |
+ return (False, None) |
def get_os_directory(self): |
"""Specifies the name of the directory for the testing build of dart, which |
@@ -471,6 +502,10 @@ |
geo_mean = 0 |
if self.test.is_valid_combination(platform, variant): |
for benchmark in self.test.values_list: |
+ if len(self.test.values_dict[platform][variant][benchmark]) == 0: |
sra1
2012/08/18 23:08:55
len(x) == 0 --> not x
if not self.test.values_dict
Emily Fortuna
2012/08/20 19:59:05
Done.
|
+ print 'Error determining mean for %s %s %s' % (platform, variant, |
+ benchmark) |
+ continue |
geo_mean += math.log( |
self.test.values_dict[platform][variant][benchmark][ |
len(self.test.values_dict[platform][variant][benchmark]) - 1]) |
sra1
2012/08/18 23:08:55
Python has negative indexes from the end, so index
Emily Fortuna
2012/08/20 19:59:05
Done.
|
@@ -519,7 +554,7 @@ |
class BrowserTester(Tester): |
@staticmethod |
def get_browsers(add_dartium=True): |
- browsers = []#['ff', 'chrome'] |
+ browsers = ['ff', 'chrome'] |
if add_dartium: |
browsers += ['dartium'] |
has_shell = False |
@@ -955,7 +990,7 @@ |
- revision_number: the revision whose performance we want to potentially |
test. |
Returns: True if we successfully ran some additional tests.""" |
- if not runner.has_interesting_code(revision_number): |
+ if not runner.has_interesting_code(revision_number)[0]: |
results_set = update_set_of_done_cls(revision_number) |
return False |
a_test = TestBuilder.make_test(runner.suite_names[0], runner) |
@@ -993,8 +1028,8 @@ |
weighted_total = sum(thousands_list) |
generated_random_number = random.randint(0, weighted_total - 1) |
for i in list(reversed(thousands_list)): |
- thousands = thousands_list[i - 1] |
- weighted_total -= thousands_list[i - 1] |
+ thousands = i |
+ weighted_total -= i |
if weighted_total <= generated_random_number: |
break |
while tries < 15 and not has_run_extra: |
@@ -1037,8 +1072,9 @@ |
if continuous: |
while True: |
results_set = update_set_of_done_cls() |
- if runner.has_interesting_code(): |
- runner.run_test_sequence() |
+ interesting_code_results = runner.has_interesting_code() |
+ if interesting_code_results[0]: |
+ runner.run_test_sequence(interesting_code_results[1]) |
else: |
results_set = fill_in_back_history(results_set, runner) |
else: |