Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(110)

Unified Diff: scripts/slave/recipe_modules/auto_bisect/bisector_test.py

Issue 940123005: Adding ability to bisect recipe to bisect into dependency repos. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@hax
Patch Set: Addressing feedback. Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: scripts/slave/recipe_modules/auto_bisect/bisector_test.py
diff --git a/scripts/slave/recipe_modules/auto_bisect/bisector_test.py b/scripts/slave/recipe_modules/auto_bisect/bisector_test.py
index ddfc2690fcf65b46db139088e2a0be37e33be9b6..dcc20a3145b2cbc045148012947bda0f7c8a0443 100644
--- a/scripts/slave/recipe_modules/auto_bisect/bisector_test.py
+++ b/scripts/slave/recipe_modules/auto_bisect/bisector_test.py
@@ -19,6 +19,22 @@ import mock
from auto_bisect.bisector import Bisector
+
+class MockRevisionClass(object): # pragma: no cover
+ def __init__(self, rev_string, bisector):
+ self.commit_pos = int(rev_string)
+ self.revision_string = rev_string
+ self.bisector = bisector
+ self.previous_revision = None
+ self.next_revision = None
+ self.values = []
+
+ def get_next_url(self):
+ if self.in_progress:
+ return 'mockurl'
+ return None
+
+
class BisectorTest(unittest.TestCase): # pragma: no cover
def setUp(self):
self.bisect_config = {
@@ -39,26 +55,13 @@ class BisectorTest(unittest.TestCase): # pragma: no cover
}
self.dummy_api = mock.Mock()
- class MockRevisionClass(object):
- def __init__(self, rev_string, bisector):
- self.commit_pos = int(rev_string)
- self.revision_string = rev_string
- self.bisector = bisector
- self.previous_revision = None
- self.next_revision = None
- self.values = []
- def get_next_url(self):
- if self.in_progress:
- return 'mockurl'
- return None
def test_create_bisector(self):
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
# Check the proper revision range is initialized
- self.assertEqual(4, len(new_bisector.revisions))
- a, b, c, d = new_bisector.revisions
+ self.assertEqual(4, len(bisector.revisions))
+ a, b, c, d = bisector.revisions
# Check that revisions are properly chained
self.assertEqual(a, b.previous_revision)
self.assertEqual(b, c.previous_revision)
@@ -77,72 +80,64 @@ class BisectorTest(unittest.TestCase): # pragma: no cover
def test_improvement_direction_default(self):
# By default, no improvement direction should be set
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- self.assertIsNone(new_bisector.improvement_direction)
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ self.assertIsNone(bisector.improvement_direction)
def test_improvement_direction_greater_is_better(self):
# Improvement up, bad > good: should fail
self.bisect_config['improvement_direction'] = 1
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- new_bisector.good_rev.mean_value = 10
- new_bisector.bad_rev.mean_value = 100
- self.assertFalse(new_bisector.check_improvement_direction())
- self.assertIn('direction of improvement', ''.join(new_bisector.warnings))
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ bisector.good_rev.mean_value = 10
+ bisector.bad_rev.mean_value = 100
+ self.assertFalse(bisector.check_improvement_direction())
+ self.assertIn('direction of improvement', ''.join(bisector.warnings))
# Improvement up, bad < good: should not fail
self.bisect_config['improvement_direction'] = 1
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- new_bisector.good_rev.mean_value = 100
- new_bisector.bad_rev.mean_value = 10
- self.assertTrue(new_bisector.check_improvement_direction())
- self.assertNotIn('direction of improvement', ''.join(new_bisector.warnings))
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ bisector.good_rev.mean_value = 100
+ bisector.bad_rev.mean_value = 10
+ self.assertTrue(bisector.check_improvement_direction())
+ self.assertNotIn('direction of improvement', ''.join(bisector.warnings))
def test_improvement_direction_lower_is_better(self):
# Improvement down, bad < good: should fail
self.bisect_config['improvement_direction'] = -1
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- new_bisector.good_rev.mean_value = 100
- new_bisector.bad_rev.mean_value = 10
- self.assertFalse(new_bisector.check_improvement_direction())
- self.assertIn('direction of improvement', ''.join(new_bisector.warnings))
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ bisector.good_rev.mean_value = 100
+ bisector.bad_rev.mean_value = 10
+ self.assertFalse(bisector.check_improvement_direction())
+ self.assertIn('direction of improvement', ''.join(bisector.warnings))
# Improvement down, bad > good: should not fail
self.bisect_config['improvement_direction'] = -1
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- new_bisector.good_rev.mean_value = 10
- new_bisector.bad_rev.mean_value = 100
- self.assertTrue(new_bisector.check_improvement_direction())
- self.assertNotIn('direction of improvement', ''.join(new_bisector.warnings))
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ bisector.good_rev.mean_value = 10
+ bisector.bad_rev.mean_value = 100
+ self.assertTrue(bisector.check_improvement_direction())
+ self.assertNotIn('direction of improvement', ''.join(bisector.warnings))
def test_check_regression_confidence_default(self):
# Test default required confidence (default may change)
mock_score = self.dummy_api.m.math_utils.confidence_score
# A confidence score of 0 should not satisfy any default
mock_score.return_value = 0
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- self.assertFalse(new_bisector.check_regression_confidence())
- self.assertTrue(new_bisector.failed_confidence)
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ self.assertFalse(bisector.check_regression_confidence())
+ self.assertTrue(bisector.failed_confidence)
# A confidence score of 100 should satisfy any default
mock_score.return_value = 100
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- self.assertTrue(new_bisector.check_regression_confidence())
- self.assertFalse(new_bisector.failed_confidence)
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ self.assertTrue(bisector.check_regression_confidence())
+ self.assertFalse(bisector.failed_confidence)
def test_check_regression_confidence_not_required(self):
# When confidence is not required, confidence_score should not be called
mock_score = self.dummy_api.m.math_utils.confidence_score
self.bisect_config['required_regression_confidence'] = None
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- self.assertTrue(new_bisector.check_regression_confidence())
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ self.assertTrue(bisector.check_regression_confidence())
self.assertFalse(mock_score.called)
def test_check_regression_confidence_arbitrary(self):
@@ -150,17 +145,15 @@ class BisectorTest(unittest.TestCase): # pragma: no cover
self.bisect_config['required_regression_confidence'] = 99
# A confidence score of 98.5 should not satisfy the required 99
mock_score.return_value = 98.5
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- self.assertFalse(new_bisector.check_regression_confidence())
- self.assertTrue(new_bisector.failed_confidence)
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ self.assertFalse(bisector.check_regression_confidence())
+ self.assertTrue(bisector.failed_confidence)
# A confidence score of 99.5 should satisfy the required 99
mock_score.return_value = 99.5
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- self.assertTrue(new_bisector.check_regression_confidence())
- self.assertFalse(new_bisector.failed_confidence)
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ self.assertTrue(bisector.check_regression_confidence())
+ self.assertFalse(bisector.failed_confidence)
def test_wait_for_all(self):
def mock_update_status(s):
@@ -172,51 +165,67 @@ class BisectorTest(unittest.TestCase): # pragma: no cover
# Plug in mock update_status method
with mock.patch(
- 'bisector_test.BisectorTest.MockRevisionClass.update_status',
+ 'bisector_test.MockRevisionClass.update_status',
mock_update_status):
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- for r in new_bisector.revisions:
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ for r in bisector.revisions:
r.in_progress = True
- new_bisector.wait_for_all(new_bisector.revisions)
+ bisector.wait_for_all(bisector.revisions)
# Verify that all revisions in list where verified by mock_update_status
- self.assertTrue(all([r.mock_verified for r in new_bisector.revisions]))
+ self.assertTrue(all([r.mock_verified for r in bisector.revisions]))
def test_wait_for_any(self):
# Creating placeholder for the patch
- self.MockRevisionClass.update_status = None
+ MockRevisionClass.update_status = None
with mock.patch(
- 'bisector_test.BisectorTest.MockRevisionClass.update_status'):
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- for r in new_bisector.revisions:
+ 'bisector_test.MockRevisionClass.update_status'):
+ bisector = Bisector(self.dummy_api, self.bisect_config, MockRevisionClass)
+ for r in bisector.revisions:
r.tested = False
r.in_progress = True
- new_bisector.revisions[0].tested = True
- finished_revision = new_bisector.wait_for_any(new_bisector.revisions)
- self.assertEqual(new_bisector.revisions[0], finished_revision)
+ bisector.revisions[0].tested = True
+ finished_revision = bisector.wait_for_any(bisector.revisions)
+ self.assertEqual(bisector.revisions[0], finished_revision)
+
+
+class BisectorAbortTest(unittest.TestCase): # pragma: no cover
+ def setUp(self):
+ self.bisect_config = {
+ 'test_type': 'perf',
+ 'command': 'tools/perf/run_benchmark -v '
+ '--browser=release page_cycler.intl_ar_fa_he',
+ 'good_revision': '306475',
+ 'bad_revision': '306478',
+ 'metric': 'warm_times/page_load_time',
+ 'repeat_count': '2',
+ 'max_time_minutes': '5',
+ 'truncate_percent': '25',
+ 'bug_id': '425582',
+ 'gs_bucket': 'chrome-perf',
+ 'builder_host': 'master4.golo.chromium.org',
+ 'builder_port': '8341',
+ 'dummy_builds': True,
+ }
+ self.dummy_api = mock.Mock()
+ self.called_abort = False
+ self.aborted_once = False
def test_abort_unnecessary_jobs(self):
- global aborted_once, called_abort
- called_abort = False
- aborted_once = False
-
- def mock_abort(s):
- global aborted_once, called_abort
- called_abort = True
- if aborted_once:
- raise Exception('Only one abort expected')
- aborted_once = True
-
- self.MockRevisionClass.abort = None
- self.MockRevisionClass.update_status = None
+ def mock_abort(_):
+ self.called_abort = True
+ if self.aborted_once:
+ raise RuntimeError('Only one abort expected')
+ self.aborted_once = True
+
+ MockRevisionClass.abort = None
+ MockRevisionClass.update_status = None
with mock.patch(
- 'bisector_test.BisectorTest.MockRevisionClass.update_status'):
- with mock.patch('bisector_test.BisectorTest.MockRevisionClass.abort',
- mock_abort) as abort_patch:
- new_bisector = Bisector(self.dummy_api, self.bisect_config,
- self.MockRevisionClass)
- r = new_bisector.revisions
+ 'bisector_test.MockRevisionClass.update_status'):
+ with mock.patch('bisector_test.MockRevisionClass.abort',
+ mock_abort):
+ bisector = Bisector(self.dummy_api, self.bisect_config,
+ MockRevisionClass)
+ r = bisector.revisions
r[0].good = True
r[0].bad = False
r[0].tested = True
@@ -236,17 +245,16 @@ class BisectorTest(unittest.TestCase): # pragma: no cover
r[3].in_progress = False
try:
- new_bisector.abort_unnecessary_jobs()
- except:
+ bisector.abort_unnecessary_jobs()
+ except RuntimeError:
self.fail('Expected to call abort only once')
- self.assertTrue(called_abort)
+ self.assertTrue(self.called_abort)
# Verifying the side effects of updating the candidate range
- self.assertEqual(r[2], new_bisector.lkgr)
- self.assertEqual(r[3], new_bisector.fkbr)
-
-# TODO: Test check_bisect_finished
+ self.assertEqual(r[2], bisector.lkgr)
+ self.assertEqual(r[3], bisector.fkbr)
+# TODO(robertocn): Add test for bisector.check_bisect_finished.
if __name__ == '__main__':
unittest.main() # pragma: no cover

Powered by Google App Engine
This is Rietveld 408576698