Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: tools/isolate/run_test_cases_smoke_test.py

Issue 10831330: Repeat Failed Tests in Serial (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/isolate/run_test_cases.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 import json
6 import logging 7 import logging
7 import os 8 import os
8 import re 9 import re
9 import subprocess 10 import subprocess
10 import sys 11 import sys
11 import unittest 12 import unittest
12 13
13 ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) 14 ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
14 15
15 sys.path.append(os.path.join(ROOT_DIR, 'data', 'gtest_fake')) 16 sys.path.append(os.path.join(ROOT_DIR, 'data', 'gtest_fake'))
16 import gtest_fake_base 17 import gtest_fake_base
17 18
18 19
19 def RunTest(test_file): 20 def RunTest(test_file, dump_file=None):
20 target = os.path.join(ROOT_DIR, 'data', 'gtest_fake', test_file) 21 target = os.path.join(ROOT_DIR, 'data', 'gtest_fake', test_file)
21 cmd = [ 22 cmd = [
22 sys.executable, 23 sys.executable,
23 os.path.join(ROOT_DIR, 'run_test_cases.py'), 24 os.path.join(ROOT_DIR, 'run_test_cases.py'),
24 '--no-dump',
25 target,
26 ] 25 ]
26
27 if dump_file:
28 cmd.extend(['--result', dump_file])
29 else:
30 cmd.append('--no-dump')
31
32 cmd.append(target)
27 logging.debug(' '.join(cmd)) 33 logging.debug(' '.join(cmd))
28 proc = subprocess.Popen( 34 proc = subprocess.Popen(
29 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 35 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
30 # pylint is confused. 36 # pylint is confused.
31 out, err = proc.communicate() or ('', '') 37 out, err = proc.communicate() or ('', '')
32 38
33 return (out, err, proc.returncode) 39 return (out, err, proc.returncode)
34 40
35 41
36 class TraceTestCases(unittest.TestCase): 42 class TraceTestCases(unittest.TestCase):
37 def setUp(self): 43 def setUp(self):
38 # Make sure there's no environment variable that could do side effects. 44 # Make sure there's no environment variable that could do side effects.
39 os.environ.pop('GTEST_SHARD_INDEX', '') 45 os.environ.pop('GTEST_SHARD_INDEX', '')
40 os.environ.pop('GTEST_TOTAL_SHARDS', '') 46 os.environ.pop('GTEST_TOTAL_SHARDS', '')
41 47
48 self.filename = 'test.results'
49
50 def tearDown(self):
51 if os.path.exists(self.filename):
52 os.remove(self.filename)
53
42 def _check_results(self, expected_out_re, out, err): 54 def _check_results(self, expected_out_re, out, err):
43 if sys.platform == 'win32': 55 if sys.platform == 'win32':
44 out = out.replace('\r\n', '\n') 56 out = out.replace('\r\n', '\n')
45 lines = out.splitlines() 57 lines = out.splitlines()
46 58
47 for index in range(len(expected_out_re)): 59 for index in range(len(expected_out_re)):
48 line = lines.pop(0) 60 line = lines.pop(0)
49 self.assertTrue( 61 self.assertTrue(
50 re.match('^%s$' % expected_out_re[index], line), 62 re.match('^%s$' % expected_out_re[index], line),
51 (index, expected_out_re[index], repr(line))) 63 (index, expected_out_re[index], repr(line)))
52 self.assertEquals([], lines) 64 self.assertEquals([], lines)
53 self.assertEquals('', err) 65 self.assertEquals('', err)
54 66
67 def _check_results_file(self, expected_file_contents_entries):
68 self.assertTrue(os.path.exists(self.filename))
69
70 with open(self.filename) as f:
71 file_contents = json.load(f)
72
73 self.assertEqual(len(expected_file_contents_entries), len(file_contents))
74 for (entry_name, entry_count) in expected_file_contents_entries:
75 self.assertTrue(entry_name in file_contents)
76 self.assertEqual(entry_count, len(file_contents[entry_name]))
77
55 def test_simple_pass(self): 78 def test_simple_pass(self):
56 out, err, return_code = RunTest('gtest_fake_pass.py') 79 out, err, return_code = RunTest('gtest_fake_pass.py',
80 dump_file=self.filename)
57 81
58 self.assertEquals(0, return_code) 82 self.assertEquals(0, return_code)
59 83
60 expected_out_re = [ 84 expected_out_re = [
61 r'\[\d/\d\] \d\.\d\ds .+', 85 r'\[\d/\d\] \d\.\d\ds .+',
62 r'\[\d/\d\] \d\.\d\ds .+', 86 r'\[\d/\d\] \d\.\d\ds .+',
63 r'\[\d/\d\] \d\.\d\ds .+', 87 r'\[\d/\d\] \d\.\d\ds .+',
88 re.escape('Summary:'),
64 re.escape('Success: 3 100.00%'), 89 re.escape('Success: 3 100.00%'),
65 re.escape('Flaky: 0 0.00%'), 90 re.escape('Flaky: 0 0.00%'),
66 re.escape('Fail: 0 0.00%'), 91 re.escape('Fail: 0 0.00%'),
67 r'\d+\.\ds Done running 3 tests with 3 executions. \d+\.\d test/s', 92 r'\d+\.\ds Done running 3 tests with 3 executions. \d+\.\d test/s',
68 ] 93 ]
69
70 self._check_results(expected_out_re, out, err) 94 self._check_results(expected_out_re, out, err)
71 95
96 expected_result_file_entries = [
97 ('Foo.Bar1', 1),
98 ('Foo.Bar2', 1),
99 ('Foo.Bar3', 1)
100 ]
101 self._check_results_file(expected_result_file_entries)
102
72 def test_simple_fail(self): 103 def test_simple_fail(self):
73 out, err, return_code = RunTest('gtest_fake_fail.py') 104 out, err, return_code = RunTest('gtest_fake_fail.py', self.filename)
74 105
75 self.assertEquals(1, return_code) 106 self.assertEquals(1, return_code)
76 107
108 test_fail_output = [
109 re.escape('Note: Google Test filter = Baz.Fail'),
110 r'',
111 ] + [
112 re.escape(l) for l in
113 gtest_fake_base.get_test_output('Baz.Fail').splitlines()
114 ] + [
115 '',
116 ] + [
117 re.escape(l) for l in gtest_fake_base.get_footer(1, 1).splitlines()
118 ] + [
119 ''
120 ]
121
77 expected_out_re = [ 122 expected_out_re = [
78 r'\[\d/\d\] \d\.\d\ds .+', 123 r'\[\d/\d\] \d\.\d\ds .+',
79 r'\[\d/\d\] \d\.\d\ds .+', 124 r'\[\d/\d\] \d\.\d\ds .+',
80 r'\[\d/\d\] \d\.\d\ds .+', 125 r'\[\d/\d\] \d\.\d\ds .+',
81 r'\[\d/\d\] \d\.\d\ds .+', 126 r'\[\d/\d\] \d\.\d\ds .+',
82 r'\[\d/\d\] \d\.\d\ds .+', 127 r'\[\d/\d\] \d\.\d\ds .+',
83 r'\[\d/\d\] \d\.\d\ds .+', 128 r'\[\d/\d\] \d\.\d\ds .+',
84 re.escape('Note: Google Test filter = Baz.Fail'), 129 ] + test_fail_output + [
85 r'', 130 re.escape('Retrying failed tests serially.'),
86 ] + [ 131 r'\[\d/\d\] \d\.\d\ds .+',
87 re.escape(l) for l in 132 ] + test_fail_output + [
88 gtest_fake_base.get_test_output('Baz.Fail').splitlines() 133 re.escape('Summary:'),
89 ] + [ 134 re.escape('Baz.Fail failed'),
90 '',
91 ] + [
92 re.escape(l) for l in gtest_fake_base.get_footer(1, 1).splitlines()
93 ] + [
94 '',
95 re.escape('Success: 3 75.00%'), 135 re.escape('Success: 3 75.00%'),
96 re.escape('Flaky: 0 0.00%'), 136 re.escape('Flaky: 0 0.00%'),
97 re.escape('Fail: 1 25.00%'), 137 re.escape('Fail: 1 25.00%'),
98 r'\d+\.\ds Done running 4 tests with 6 executions. \d+\.\d test/s', 138 r'\d+\.\ds Done running 4 tests with 6 executions. \d+\.\d test/s',
99 ] 139 ]
100 self._check_results(expected_out_re, out, err) 140 self._check_results(expected_out_re, out, err)
101 141
142 expected_result_file_entries = [
143 ('Foo.Bar1', 1),
144 ('Foo.Bar2', 1),
145 ('Foo.Bar3', 1),
146 ('Baz.Fail', 4)
147 ]
148 self._check_results_file(expected_result_file_entries)
149
102 def test_simple_gtest_list_error(self): 150 def test_simple_gtest_list_error(self):
103 out, err, return_code = RunTest('gtest_fake_error.py') 151 out, err, return_code = RunTest('gtest_fake_error.py')
104 152
105 expected_out_re = [ 153 expected_out_re = [
106 'Failed to run .+gtest_fake_error.py', 154 'Failed to run .+gtest_fake_error.py',
107 'Unable to list tests' 155 'Unable to list tests'
108 ] 156 ]
109 157
110 self.assertEqual(1, return_code) 158 self.assertEqual(1, return_code)
111 self._check_results(expected_out_re, out, err) 159 self._check_results(expected_out_re, out, err)
112 160
113 161
114 if __name__ == '__main__': 162 if __name__ == '__main__':
115 VERBOSE = '-v' in sys.argv 163 VERBOSE = '-v' in sys.argv
116 logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR) 164 logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR)
117 unittest.main() 165 unittest.main()
OLDNEW
« no previous file with comments | « tools/isolate/run_test_cases.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698