Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(276)

Side by Side Diff: tools/bisect-perf-regression.py

Issue 12092033: First pass on tool to bisect across range of revisions to help narrow down where a regression in a … (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Messed some git stuff up. Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5
6 """Performance Test Bisect Tool
7
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
15
16
17 An example usage (using svn cl's):
18
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
22
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
26
27
28 An example usage (using git hashes):
29
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
35
36 """
37
38
39 import re
40 import os
41 import imp
42 import sys
43 import shlex
44 import optparse
45 import subprocess
46
47
48 DEPOT_DEPS_NAME = { 'webkit' : "src/third_party/WebKit",
49 'v8' : 'src/v8' }
50 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
51
52 FILE_DEPS_GIT = '.DEPS.git'
53
54
55
56 def IsStringFloat(string_to_check):
57 """Checks whether or not the given string can be converted to a floating
58 point number.
59
60 Args:
61 string_to_check: Input string to check if it can be converted to a float.
62
63 Returns:
64 True if the string can be converted to a float.
65 """
66 try:
67 float(string_to_check)
68
69 return True
70 except ValueError:
71 return False
72
73
74 def IsStringInt(string_to_check):
75 """Checks whether or not the given string can be converted to a integer.
76
77 Args:
78 string_to_check: Input string to check if it can be converted to an int.
79
80 Returns:
81 True if the string can be converted to an int.
82 """
83 try:
84 int(string_to_check)
85
86 return True
87 except ValueError:
88 return False
89
90
91 def RunProcess(command):
92 """Run an arbitrary command, returning its output and return code.
93
94 Args:
95 command: A list containing the command and args to execute.
96
97 Returns:
98 A tuple of the output and return code.
99 """
100 # On Windows, use shell=True to get PATH interpretation.
101 shell = (os.name == 'nt')
102 proc = subprocess.Popen(command,
103 shell=shell,
104 stdout=subprocess.PIPE,
105 stderr=subprocess.PIPE)
106 out = proc.communicate()[0]
107
108 return (out, proc.returncode)
109
110
111 def RunGit(command):
112 """Run a git subcommand, returning its output and return code.
113
114 Args:
115 command: A list containing the args to git.
116
117 Returns:
118 A tuple of the output and return code.
119 """
120 command = ['git'] + command
121
122 return RunProcess(command)
123
124
125 class SourceControl(object):
126 """SourceControl is an abstraction over the underlying source control
127 system used for chromium. For now only git is supported, but in the
128 future, the svn workflow could be added as well."""
129 def __init__(self):
130 super(SourceControl, self).__init__()
131
132 def SyncToRevisionWithGClient(self, revision):
133 """Uses gclient to sync to the specified revision.
134
135 ie. gclient sync --revision <revision>
136
137 Args:
138 revision: The git SHA1 or svn CL (depending on workflow).
139
140 Returns:
141 A tuple of the output and return code.
142 """
143 args = ['gclient', 'sync', '--revision', revision]
144
145 return RunProcess(args)
146
147
148 class GitSourceControl(SourceControl):
149 """GitSourceControl is used to query the underlying source control. """
150 def __init__(self):
151 super(GitSourceControl, self).__init__()
152
153 def GetRevisionList(self, revision_range_end, revision_range_start):
154 """Retrieves a list of revisions between |revision_range_start| and
155 |revision_range_end|.
156
157 Args:
158 revision_range_end: The SHA1 for the end of the range.
159 revision_range_start: The SHA1 for the beginning of the range.
160
161 Returns:
162 A list of the revisions between |revision_range_start| and
163 |revision_range_end| (inclusive).
164 """
165 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
166 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
167 (log_output, return_code) = RunGit(cmd)
168
169 assert not return_code, 'An error occurred while running'\
170 ' "git %s"' % ' '.join(cmd)
171
172 revision_hash_list = log_output.split()
173 revision_hash_list.append(revision_range_start)
174
175 return revision_hash_list
176
177 def SyncToRevision(self, revision, use_gclient=True):
178 """Syncs to the specified revision.
179
180 Args:
181 revision: The revision to sync to.
182 use_gclient: Specifies whether or not we should sync using gclient or
183 just use source control directly.
184
185 Returns:
186 True if successful.
187 """
188
189 if use_gclient:
190 results = self.SyncToRevisionWithGClient(revision)
191 else:
192 results = RunGit(['checkout', revision])
193
194 return not results[1]
195
196 def ResolveToRevision(self, revision_to_check):
197 """If an SVN revision is supplied, try to resolve it to a git SHA1.
198
199 Args:
200 revision_to_check: The user supplied revision string that may need to be
201 resolved to a git SHA1.
202
203 Returns:
204 A string containing a git SHA1 hash, otherwise None.
205 """
206 if not IsStringInt(revision_to_check):
207 return revision_to_check
208
209 svn_pattern = 'SVN changes up to revision ' + revision_to_check
210 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern, 'origin/master']
211
212 (log_output, return_code) = RunGit(cmd)
213
214 assert not return_code, 'An error occurred while running'\
215 ' "git %s"' % ' '.join(cmd)
216
217 revision_hash_list = log_output.split()
218
219 if revision_hash_list:
220 return revision_hash_list[0]
221
222 return None
223
224 def IsInProperBranch(self):
225 """Confirms they're in the master branch for performing the bisection.
226 This is needed or gclient will fail to sync properly.
227
228 Returns:
229 True if the current branch on src is 'master'
230 """
231 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
232 (log_output, return_code) = RunGit(cmd)
233
234 assert not return_code, 'An error occurred while running'\
235 ' "git %s"' % ' '.join(cmd)
236
237 log_output = log_output.strip()
238
239 return log_output == "master"
240
241
242
243 class BisectPerformanceMetrics(object):
244 """BisectPerformanceMetrics performs a bisection against a list of range
245 of revisions to narrow down where performance regressions may have
246 occurred."""
247
248 def __init__(self, source_control, opts):
249 super(BisectPerformanceMetrics, self).__init__()
250
251 self.opts = opts
252 self.source_control = source_control
253 self.src_cwd = os.getcwd()
254 self.depot_cwd = {}
255
256 for d in DEPOT_NAMES:
257 # The working directory of each depot is just the path to the depot, but
258 # since we're already in 'src', we can skip that part.
259
260 self.depot_cwd[d] = self.src_cwd + DEPOT_DEPS_NAME[d][3:]
261
262 def GetRevisionList(self, bad_revision, good_revision):
263 """Retrieves a list of all the commits between the bad revision and
264 last known good revision."""
265
266 revision_work_list = self.source_control.GetRevisionList(bad_revision,
267 good_revision)
268
269 return revision_work_list
270
271 def Get3rdPartyRevisionsFromCurrentRevision(self):
272 """Parses the DEPS file to determine WebKit/v8/etc... versions.
273
274 Returns:
275 A dict in the format {depot:revision} if successful, otherwise None.
276 """
277
278 cwd = os.getcwd()
279 os.chdir(self.src_cwd)
280
281 locals = {'Var': lambda _: locals["vars"][_],
282 'From': lambda *args: None}
283 execfile(FILE_DEPS_GIT, {}, locals)
284
285 os.chdir(cwd)
286
287 results = {}
288
289 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
290
291 for d in DEPOT_NAMES:
292 if locals['deps'].has_key(DEPOT_DEPS_NAME[d]):
293 re_results = rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]])
294
295 if re_results:
296 results[d] = re_results.group('revision')
297 else:
298 return None
299 else:
300 return None
301
302 return results
303
304 def BuildCurrentRevision(self):
305 """Builds chrome and performance_ui_tests on the current revision.
306
307 Returns:
308 True if the build was successful.
309 """
310
311 if self.opts.debug_ignore_build:
312 return True
313
314 gyp_var = os.getenv('GYP_GENERATORS')
315
316 num_threads = 16
317
318 if self.opts.use_goma:
319 num_threads = 100
320
321 if gyp_var != None and 'ninja' in gyp_var:
322 args = ['ninja',
323 '-C',
324 'out/Release',
325 '-j%d' % num_threads,
326 'chrome',
327 'performance_ui_tests']
328 else:
329 args = ['make',
330 'BUILDTYPE=Release',
331 '-j%d' % num_threads,
332 'chrome',
333 'performance_ui_tests']
334
335 cwd = os.getcwd()
336 os.chdir(self.src_cwd)
337
338 (output, return_code) = RunProcess(args)
339
340 os.chdir(cwd)
341
342 return not return_code
343
344 def RunGClientHooks(self):
345 """Runs gclient with runhooks command.
346
347 Returns:
348 True if gclient reports no errors.
349 """
350
351 if self.opts.debug_ignore_build:
352 return True
353
354 results = RunProcess(['gclient', 'runhooks'])
355
356 return not results[1]
357
358 def ParseMetricValuesFromOutput(self, metric, text):
359 """Parses output from performance_ui_tests and retrieves the results for
360 a given metric.
361
362 Args:
363 metric: The metric as a list of [<trace>, <value>] strings.
364 text: The text to parse the metric values from.
365
366 Returns:
367 A list of floating point numbers found.
368 """
369 # Format is: RESULT <graph>: <trace>= <value> <units>
370 metric_formatted = 'RESULT %s: %s=' % (metric[0], metric[1])
371
372 text_lines = text.split('\n')
373 values_list = []
374
375 for current_line in text_lines:
376 # Parse the output from the performance test for the metric we're
377 # interested in.
378 metric_re = metric_formatted +\
379 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
380 metric_re = re.compile(metric_re)
381 regex_results = metric_re.search(current_line)
382
383 if not regex_results is None:
384 values_list += [regex_results.group('values')]
385 else:
386 metric_re = metric_formatted +\
387 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
388 metric_re = re.compile(metric_re)
389 regex_results = metric_re.search(current_line)
390
391 if not regex_results is None:
392 metric_values = regex_results.group('values')
393
394 values_list += metric_values.split(',')
395
396 return [float(v) for v in values_list if IsStringFloat(v)]
397
398 def RunPerformanceTestAndParseResults(self, command_to_run, metric):
399 """Runs a performance test on the current revision by executing the
400 'command_to_run' and parses the results.
401
402 Args:
403 command_to_run: The command to be run to execute the performance test.
404 metric: The metric to parse out from the results of the performance test.
405
406 Returns:
407 On success, it will return a tuple of the average value of the metric,
408 and a success code of 0.
409 """
410
411 if self.opts.debug_ignore_perf_test:
412 return (0.0, 0)
413
414 args = shlex.split(command_to_run)
415
416 cwd = os.getcwd()
417 os.chdir(self.src_cwd)
418
419 # Can ignore the return code since if the tests fail, it won't return 0.
420 (output, return_code) = RunProcess(args)
421
422 os.chdir(cwd)
423
424 metric_values = self.ParseMetricValuesFromOutput(metric, output)
425
426 # Need to get the average value if there were multiple values.
427 if metric_values:
428 average_metric_value = reduce(lambda x, y: float(x) + float(y),
429 metric_values) / len(metric_values)
430
431 return (average_metric_value, 0)
432 else:
433 return ('No values returned from performance test.', -1)
434
435 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric):
436 """Performs a full sync/build/run of the specified revision.
437
438 Args:
439 revision: The revision to sync to.
440 depot: The depot that's being used at the moment (src, webkit, etc.)
441 command_to_run: The command to execute the performance test.
442 metric: The performance metric being tested.
443
444 Returns:
445 On success, a tuple containing the results of the performance test.
446 Otherwise, a tuple with the error message.
447 """
448 use_gclient = (depot == 'chromium')
449
450 if self.opts.debug_ignore_sync or\
451 self.source_control.SyncToRevision(revision, use_gclient):
452
453 success = True
454 if not(use_gclient):
455 success = self.RunGClientHooks()
456
457 if success:
458 if self.BuildCurrentRevision():
459 results = self.RunPerformanceTestAndParseResults(command_to_run,
460 metric)
461
462 if results[1] == 0 and use_gclient:
463 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision()
464
465 if external_revisions:
466 return (results[0], results[1], external_revisions)
467 else:
468 return ('Failed to parse DEPS file for external revisions.', 1)
469 else:
470 return results
471 else:
472 return ('Failed to build revision: [%s]' % (str(revision, )), 1)
473 else:
474 return ('Failed to run [gclient runhooks].', 1)
475 else:
476 return ('Failed to sync revision: [%s]' % (str(revision, )), 1)
477
478 def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
479 """Given known good and bad values, decide if the current_value passed
480 or failed.
481
482 Args:
483 current_value: The value of the metric being checked.
484 known_bad_value: The reference value for a "failed" run.
485 known_good_value: The reference value for a "passed" run.
486
487 Returns:
488 True if the current_value is closer to the known_good_value than the
489 known_bad_value.
490 """
491 dist_to_good_value = abs(current_value - known_good_value)
492 dist_to_bad_value = abs(current_value - known_bad_value)
493
494 return dist_to_good_value < dist_to_bad_value
495
496 def ChangeToDepotWorkingDirectory(self, depot_name):
497 """Given a depot, changes to the appropriate working directory.
498
499 Args:
500 depot_name: The name of the depot (see DEPOT_NAMES).
501 """
502 if depot_name == 'chromium':
503 os.chdir(self.src_cwd)
504 elif depot_name in DEPOT_NAMES:
505 os.chdir(self.depot_cwd[depot_name])
506 else:
507 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
508 ' was added without proper support?' %\
509 (depot_name,)
510
511 def PrepareToBisectOnDepot(self,
512 current_depot,
513 end_revision,
514 start_revision):
515 """Changes to the appropriate directory and gathers a list of revisions
516 to bisect between |start_revision| and |end_revision|.
517
518 Args:
519 current_depot: The depot we want to bisect.
520 end_revision: End of the revision range.
521 start_revision: Start of the revision range.
522
523 Returns:
524 A list containing the revisions between |start_revision| and
525 |end_revision| inclusive.
526 """
527 # Change into working directory of external library to run
528 # subsequent commands.
529 old_cwd = os.getcwd()
530 os.chdir(self.depot_cwd[current_depot])
531
532 depot_revision_list = self.GetRevisionList(end_revision, start_revision)
533
534 os.chdir(old_cwd)
535
536 return depot_revision_list
537
538 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric):
539 """Gathers reference values by running the performance tests on the
540 known good and bad revisions.
541
542 Args:
543 good_rev: The last known good revision where the performance regression
544 has not occurred yet.
545 bad_rev: A revision where the performance regression has already occurred.
546 cmd: The command to execute the performance test.
547 metric: The metric being tested for regression.
548
549 Returns:
550 A tuple with the results of building and running each revision.
551 """
552 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
553 'chromium',
554 cmd,
555 metric)
556
557 good_run_results = None
558
559 if not bad_run_results[1]:
560 good_run_results = self.SyncBuildAndRunRevision(good_rev,
561 'chromium',
562 cmd,
563 metric)
564
565 return (bad_run_results, good_run_results)
566
567 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
568 """Adds new revisions to the revision_data dict and initializes them.
569
570 Args:
571 revisions: List of revisions to add.
572 depot: Depot that's currently in use (src, webkit, etc...)
573 sort: Sorting key for displaying revisions.
574 revision_data: A dict to add the new revisions into. Existing revisions
575 will have their sort keys offset.
576 """
577
578 num_depot_revisions = len(revisions)
579
580 for k, v in revision_data.iteritems():
581 if v['sort'] > sort:
582 v['sort'] += num_depot_revisions
583
584 for i in xrange(num_depot_revisions):
585 r = revisions[i]
586
587 revision_data[r] = {'revision' : r,
588 'depot' : depot,
589 'value' : None,
590 'passed' : '?',
591 'sort' : i + sort + 1}
592
593 def PrintRevisionsToBisectMessage(self, revision_list, depot):
594 print
595 print 'Revisions to bisect on [src]:'
596 for revision_id in revision_list:
597 print(' -> %s' % (revision_id, ))
598 print
599
600 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
601 """Given known good and bad revisions, run a binary search on all
602 intermediate revisions to determine the CL where the performance regression
603 occurred.
604
605 Args:
606 command_to_run: Specify the command to execute the performance test.
607 good_revision: Number/tag of the known good revision.
608 bad_revision: Number/tag of the known bad revision.
609 metric: The performance metric to monitor.
610
611 Returns:
612 A dict with 2 members, 'revision_data' and 'error'. On success,
613 'revision_data' will contain a dict mapping revision ids to
614 data about that revision. Each piece of revision data consists of a
615 dict with the following keys:
616
617 'passed': Represents whether the performance test was successful at
618 that revision. Possible values include: 1 (passed), 0 (failed),
619 '?' (skipped), 'F' (build failed).
620 'depot': The depot that this revision is from (ie. WebKit)
621 'external': If the revision is a 'src' revision, 'external' contains
622 the revisions of each of the external libraries.
623 'sort': A sort value for sorting the dict in order of commits.
624
625 For example:
626 {
627 'error':None,
628 'revision_data':
629 {
630 'CL #1':
631 {
632 'passed':False,
633 'depot':'chromium',
634 'external':None,
635 'sort':0
636 }
637 }
638 }
639
640 If an error occurred, the 'error' field will contain the message and
641 'revision_data' will be empty.
642 """
643
644 results = {'revision_data' : {},
645 'error' : None}
646
647 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
648 bad_revision = self.source_control.ResolveToRevision(bad_revision_in)
649 good_revision = self.source_control.ResolveToRevision(good_revision_in)
650
651 if bad_revision is None:
652 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
653 return results
654
655 if good_revision is None:
656 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
657 return results
658
659 print 'Gathering revision range for bisection.'
660
661 # Retrieve a list of revisions to do bisection on.
662 src_revision_list = self.GetRevisionList(bad_revision, good_revision)
663
664 if src_revision_list:
665 # revision_data will store information about a revision such as the
666 # depot it came from, the webkit/V8 revision at that time,
667 # performance timing, build state, etc...
668 revision_data = results['revision_data']
669
670 # revision_list is the list we're binary searching through at the moment.
671 revision_list = []
672
673 sort_key_ids = 0
674
675 for current_revision_id in src_revision_list:
676 sort_key_ids += 1
677
678 revision_data[current_revision_id] = {'value' : None,
679 'passed' : '?',
680 'depot' : 'chromium',
681 'external' : None,
682 'sort' : sort_key_ids}
683 revision_list.append(current_revision_id)
684
685 min_revision = 0
686 max_revision = len(revision_list) - 1
687
688 self.PrintRevisionsToBisectMessage(revision_list, 'src')
689
690 print 'Gathering reference values for bisection.'
691
692 # Perform the performance tests on the good and bad revisions, to get
693 # reference values.
694 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
695 bad_revision,
696 command_to_run,
697 metric)
698
699 if bad_results[1]:
700 results['error'] = bad_results[0]
701 return results
702
703 if good_results[1]:
704 results['error'] = good_results[0]
705 return results
706
707
708 # We need these reference values to determine if later runs should be
709 # classified as pass or fail.
710 known_bad_value = bad_results[0]
711 known_good_value = good_results[0]
712
713 # Can just mark the good and bad revisions explicitly here since we
714 # already know the results.
715 bad_revision_data = revision_data[revision_list[0]]
716 bad_revision_data['external'] = bad_results[2]
717 bad_revision_data['passed'] = 0
718 bad_revision_data['value'] = known_bad_value
719
720 good_revision_data = revision_data[revision_list[max_revision]]
721 good_revision_data['external'] = good_results[2]
722 good_revision_data['passed'] = 1
723 good_revision_data['value'] = known_good_value
724
725 while True:
726 min_revision_data = revision_data[revision_list[min_revision]]
727 max_revision_data = revision_data[revision_list[max_revision]]
728
729 if max_revision - min_revision <= 1:
730 if min_revision_data['passed'] == '?':
731 next_revision_index = min_revision
732 elif max_revision_data['passed'] == '?':
733 next_revision_index = max_revision
734 elif min_revision_data['depot'] == 'chromium':
735 # If there were changes to any of the external libraries we track,
736 # should bisect the changes there as well.
737 external_depot = None
738
739 for current_depot in DEPOT_NAMES:
740 if min_revision_data['external'][current_depot] !=\
741 max_revision_data['external'][current_depot]:
742 external_depot = current_depot
743
744 break
745
746 # If there was no change in any of the external depots, the search
747 # is over.
748 if not external_depot:
749 break
750
751 rev_range = [min_revision_data['external'][current_depot],
752 max_revision_data['external'][current_depot]]
753
754 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
755 rev_range[1],
756 rev_range[0])
757
758 if not new_revision_list:
759 results['error'] = 'An error occurred attempting to retrieve'\
760 ' revision range: [%s..%s]' %\
761 (depot_rev_range[1], depot_rev_range[0])
762 return results
763
764 self.AddRevisionsIntoRevisionData(new_revision_list,
765 external_depot,
766 min_revision_data['sort'],
767 revision_data)
768
769 # Reset the bisection and perform it on the newly inserted
770 # changelists.
771 revision_list = new_revision_list
772 min_revision = 0
773 max_revision = len(revision_list) - 1
774 sort_key_ids += len(revision_list)
775
776 print 'Regression in metric:%s appears to be the result of changes'\
777 ' in [%s].' % (metric, current_depot)
778
779 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
780
781 continue
782 else:
783 break
784 else:
785 next_revision_index = int((max_revision - min_revision) / 2) +\
786 min_revision
787
788 next_revision_id = revision_list[next_revision_index]
789 next_revision_data = revision_data[next_revision_id]
790 next_revision_depot = next_revision_data['depot']
791
792 self.ChangeToDepotWorkingDirectory(next_revision_depot)
793
794 print 'Working on revision: [%s]' % next_revision_id
795
796 run_results = self.SyncBuildAndRunRevision(next_revision_id,
797 next_revision_depot,
798 command_to_run,
799 metric)
800
801 # If the build is successful, check whether or not the metric
802 # had regressed.
803 if run_results[1] == 0:
804 if next_revision_depot == 'chromium':
805 next_revision_data['external'] = run_results[2]
806
807 passed_regression = self.CheckIfRunPassed(run_results[0],
808 known_good_value,
809 known_bad_value)
810
811 next_revision_data['passed'] = passed_regression
812 next_revision_data['value'] = run_results[0]
813
814 if passed_regression:
815 max_revision = next_revision_index
816 else:
817 min_revision = next_revision_index
818 else:
819 next_revision_data['passed'] = 'F'
820
821 # If the build is broken, remove it and redo search.
822 revision_list.pop(next_revision_index)
823
824 max_revision -= 1
825 else:
826 # Weren't able to sync and retrieve the revision range.
827 results['error'] = 'An error occurred attempting to retrieve revision '\
828 'range: [%s..%s]' % (good_revision, bad_revision)
829
830 return results
831
832 def FormatAndPrintResults(self, bisect_results):
833 """Prints the results from a bisection run in a readable format.
834
835 Args
836 bisect_results: The results from a bisection test run.
837 """
838 revision_data = bisect_results['revision_data']
839 revision_data_sorted = sorted(revision_data.iteritems(),
840 key = lambda x: x[1]['sort'])
841
842 print
843 print 'Full results of bisection:'
844 for current_id, current_data in revision_data_sorted:
845 build_status = current_data['passed']
846 metric_value = current_data['value']
847
848 if type(build_status) is bool:
849 build_status = int(build_status)
850
851 if metric_value is None:
852 metric_value = ''
853
854 print(' %8s %s %s %6s' %\
855 (current_data['depot'], current_id, build_status, metric_value))
856 print
857
858 # Find range where it possibly broke.
859 first_working_revision = None
860 last_broken_revision = None
861
862 for k, v in revision_data_sorted:
863 if v['passed'] == True:
864 if first_working_revision is None:
865 first_working_revision = k
866
867 if v['passed'] == False:
868 last_broken_revision = k
869
870 if last_broken_revision != None and first_working_revision != None:
871 print 'Results: Regression was detected as a result of changes on:'
872 print ' -> First Bad Revision: [%s] [%s]' %\
873 (last_broken_revision,
874 revision_data[last_broken_revision]['depot'])
875 print ' -> Last Good Revision: [%s] [%s]' %\
876 (first_working_revision,
877 revision_data[first_working_revision]['depot'])
878
879
880 def DetermineAndCreateSourceControl():
881 """Attempts to determine the underlying source control workflow and returns
882 a SourceControl object.
883
884 Returns:
885 An instance of a SourceControl object, or None if the current workflow
886 is unsupported.
887 """
888
889 (output, return_code) = RunGit(['rev-parse', '--is-inside-work-tree'])
890
891 if output.strip() == 'true':
892 return GitSourceControl()
893
894 return None
895
896
897 def main():
898
899 usage = ('%prog [options] [-- chromium-options]\n'
900 'Perform binary search on revision history to find a minimal '
901 'range of revisions where a peformance metric regressed.\n')
902
903 parser = optparse.OptionParser(usage=usage)
904
905 parser.add_option('-c', '--command',
906 type='str',
907 help='A command to execute your performance test at' +
908 ' each point in the bisection.')
909 parser.add_option('-b', '--bad_revision',
910 type='str',
911 help='A bad revision to start bisection. ' +
912 'Must be later than good revision. May be either a git' +
913 ' or svn revision.')
914 parser.add_option('-g', '--good_revision',
915 type='str',
916 help='A revision to start bisection where performance' +
917 ' test is known to pass. Must be earlier than the ' +
918 'bad revision. May be either a git or svn revision.')
919 parser.add_option('-m', '--metric',
920 type='str',
921 help='The desired metric to bisect on.')
922 parser.add_option('--use_goma',
923 action="store_true",
924 help='Add a bunch of extra threads for goma.')
925 parser.add_option('--debug_ignore_build',
926 action="store_true",
927 help='DEBUG: Don\'t perform builds.')
928 parser.add_option('--debug_ignore_sync',
929 action="store_true",
930 help='DEBUG: Don\'t perform syncs.')
931 parser.add_option('--debug_ignore_perf_test',
932 action="store_true",
933 help='DEBUG: Don\'t perform performance tests.')
934 (opts, args) = parser.parse_args()
935
936 if not opts.command:
937 print 'Error: missing required parameter: --command'
938 print
939 parser.print_help()
940 return 1
941
942 if not opts.good_revision:
943 print 'Error: missing required parameter: --good_revision'
944 print
945 parser.print_help()
946 return 1
947
948 if not opts.bad_revision:
949 print 'Error: missing required parameter: --bad_revision'
950 print
951 parser.print_help()
952 return 1
953
954 if not opts.metric:
955 print 'Error: missing required parameter: --metric'
956 print
957 parser.print_help()
958 return 1
959
960 # Haven't tested the script out on any other platforms yet.
961 if not os.name in ['posix']:
962 print "Sorry, this platform isn't supported yet."
963 print
964 return 1
965
966
967 # Check what source control method they're using. Only support git workflow
968 # at the moment.
969 source_control = DetermineAndCreateSourceControl()
970
971 if not source_control:
972 print "Sorry, only the git workflow is supported at the moment."
973 print
974 return 1
975
976 # gClient sync seems to fail if you're not in master branch.
977 if not source_control.IsInProperBranch():
978 print "You must switch to master branch to run bisection."
979 print
980 return 1
981
982 metric_values = opts.metric.split('/')
983 if len(metric_values) < 2:
984 print "Invalid metric specified: [%s]" % (opts.metric,)
985 print
986 return 1
987
988
989 bisect_test = BisectPerformanceMetrics(source_control, opts)
990 bisect_results = bisect_test.Run(opts.command,
991 opts.bad_revision,
992 opts.good_revision,
993 metric_values)
994
995 if not(bisect_results['error']):
996 bisect_test.FormatAndPrintResults(bisect_results)
997 return 0
998 else:
999 print 'Error: ' + bisect_results['error']
1000 print
1001 return 1
1002
1003 if __name__ == '__main__':
1004 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698