OLD | NEW |
1 # Copyright 2014 The LUCI Authors. All rights reserved. | 1 # Copyright 2014 The LUCI Authors. All rights reserved. |
2 # Use of this source code is governed under the Apache License, Version 2.0 | 2 # Use of this source code is governed under the Apache License, Version 2.0 |
3 # that can be found in the LICENSE file. | 3 # that can be found in the LICENSE file. |
4 | 4 |
5 """Provides simulator test coverage for individual recipes.""" | 5 """Provides simulator test coverage for individual recipes.""" |
6 | 6 |
7 import StringIO | 7 import StringIO |
| 8 import ast |
8 import contextlib | 9 import contextlib |
| 10 import copy |
9 import json | 11 import json |
10 import logging | 12 import logging |
11 import os | 13 import os |
12 import re | 14 import re |
13 import sys | 15 import sys |
| 16 import textwrap |
| 17 import traceback |
| 18 import inspect |
| 19 |
| 20 from collections import OrderedDict, namedtuple |
14 | 21 |
15 from . import env | 22 from . import env |
16 from . import stream | 23 from . import stream |
17 import expect_tests | 24 import expect_tests |
| 25 from .checker import Checker, VerifySubset |
18 | 26 |
19 # This variable must be set in the dynamic scope of the functions in this file. | 27 # This variable must be set in the dynamic scope of the functions in this file. |
20 # We do this instead of passing because the threading system of expect tests | 28 # We do this instead of passing because the threading system of expect tests |
21 # doesn't know how to serialize it. | 29 # doesn't know how to serialize it. |
22 _UNIVERSE = None | 30 _UNIVERSE = None |
23 | 31 |
24 | 32 |
25 def RenderExpectation(test_data, raw_expectations): | 33 class PostProcessError(ValueError): |
26 """Applies the step filters (e.g. whitelists, etc.) to the raw_expectations, | 34 pass |
27 if the TestData actually contains any filters. | 35 |
| 36 |
| 37 def _renderExpectation(test_data, step_odict): |
| 38 """Applies the step post_process actions to the step_odict, if the |
| 39 TestData actually contains any. |
28 | 40 |
29 Returns the final expect_tests.Result.""" | 41 Returns the final expect_tests.Result.""" |
30 if test_data.whitelist_data: | |
31 whitelist_data = dict(test_data.whitelist_data) # copy so we can mutate it | |
32 def filter_expectation(step): | |
33 whitelist = whitelist_data.pop(step['name'], None) | |
34 if whitelist is None: | |
35 return | |
36 | 42 |
37 whitelist = set(whitelist) # copy so we can mutate it | 43 failed_checks = [] |
38 if len(whitelist) > 0: | |
39 whitelist.add('name') | |
40 step = {k: v for k, v in step.iteritems() if k in whitelist} | |
41 whitelist.difference_update(step.keys()) | |
42 if whitelist: | |
43 raise ValueError( | |
44 "The whitelist includes fields %r in step %r, but those fields" | |
45 " don't exist." | |
46 % (whitelist, step['name'])) | |
47 return step | |
48 raw_expectations = filter(filter_expectation, raw_expectations) | |
49 | 44 |
50 if whitelist_data: | 45 for hook, args, kwargs, filename, lineno in test_data.post_process_hooks: |
51 raise ValueError( | 46 input_odict = copy.deepcopy(step_odict) |
52 "The step names %r were included in the whitelist, but were never run." | 47 # we ignore the input_odict so that it never gets printed in full. Usually |
53 % [s['name'] for s in whitelist_data]) | 48 # the check invocation itself will index the input_odict or will use it only |
| 49 # for a key membership comparison, which provides enough debugging context. |
| 50 checker = Checker(filename, lineno, hook, args, kwargs, input_odict) |
| 51 rslt = hook(checker, input_odict, *args, **kwargs) |
| 52 failed_checks += checker.failed_checks |
| 53 if rslt is not None: |
| 54 msg = VerifySubset(rslt, step_odict) |
| 55 if not rslt: |
| 56 print 'ZAP!', hook |
| 57 if msg: |
| 58 raise PostProcessError('post_process: steps'+msg) |
| 59 # restore 'name' |
| 60 for k, v in rslt.iteritems(): |
| 61 if 'name' not in v: |
| 62 v['name'] = k |
| 63 step_odict = rslt |
54 | 64 |
55 return expect_tests.Result(raw_expectations) | 65 # empty means drop expectation |
| 66 result_data = step_odict.values() if step_odict else None |
| 67 return expect_tests.Result(result_data, failed_checks) |
56 | 68 |
57 | 69 |
58 class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine): | 70 class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine): |
59 | 71 |
60 def __init__(self): | 72 def __init__(self): |
61 self._step_buffer_map = {} | 73 self._step_buffer_map = {} |
62 super(SimulationAnnotatorStreamEngine, self).__init__( | 74 super(SimulationAnnotatorStreamEngine, self).__init__( |
63 self.step_buffer(None)) | 75 self.step_buffer(None)) |
64 | 76 |
65 def step_buffer(self, step_name): | 77 def step_buffer(self, step_name): |
66 return self._step_buffer_map.setdefault(step_name, StringIO.StringIO()) | 78 return self._step_buffer_map.setdefault(step_name, StringIO.StringIO()) |
67 | 79 |
68 def new_step_stream(self, step_config): | 80 def new_step_stream(self, step_config): |
69 return self._create_step_stream(step_config, | 81 return self._create_step_stream(step_config, |
70 self.step_buffer(step_config.name)) | 82 self.step_buffer(step_config.name)) |
71 | 83 |
72 | 84 |
73 def RunRecipe(test_data): | 85 # This maps from (recipe_name,test_name) -> yielded test_data. It's outside of |
| 86 # RunRecipe so that it can persist between RunRecipe calls in the same process. |
| 87 _GEN_TEST_CACHE = {} |
| 88 |
| 89 def RunRecipe(recipe_name, test_name): |
74 """Actually runs the recipe given the GenTests-supplied test_data.""" | 90 """Actually runs the recipe given the GenTests-supplied test_data.""" |
75 from . import config_types | 91 from . import config_types |
76 from . import loader | 92 from . import loader |
77 from . import run | 93 from . import run |
78 from . import step_runner | 94 from . import step_runner |
79 from . import stream | 95 |
| 96 if recipe_name not in _GEN_TEST_CACHE: |
| 97 recipe_script = _UNIVERSE.load_recipe(recipe_name) |
| 98 test_api = loader.create_test_api(recipe_script.LOADED_DEPS, _UNIVERSE) |
| 99 for test_data in recipe_script.gen_tests(test_api): |
| 100 _GEN_TEST_CACHE[(recipe_name, test_data.name)] = test_data |
| 101 |
| 102 test_data = _GEN_TEST_CACHE[(recipe_name, test_name)] |
80 | 103 |
81 config_types.ResetTostringFns() | 104 config_types.ResetTostringFns() |
82 | 105 |
83 annotator = SimulationAnnotatorStreamEngine() | 106 annotator = SimulationAnnotatorStreamEngine() |
84 with stream.StreamEngineInvariants.wrap(annotator) as stream_engine: | 107 with stream.StreamEngineInvariants.wrap(annotator) as stream_engine: |
85 step_runner = step_runner.SimulationStepRunner(stream_engine, test_data, | 108 step_runner = step_runner.SimulationStepRunner(stream_engine, test_data, |
86 annotator) | 109 annotator) |
87 | 110 |
88 engine = run.RecipeEngine(step_runner, test_data.properties, _UNIVERSE) | 111 props = test_data.properties.copy() |
89 recipe_script = _UNIVERSE.load_recipe(test_data.properties['recipe'], | 112 props['recipe'] = recipe_name |
90 engine=engine) | 113 engine = run.RecipeEngine(step_runner, props, _UNIVERSE) |
| 114 recipe_script = _UNIVERSE.load_recipe(recipe_name, engine=engine) |
91 api = loader.create_recipe_api(recipe_script.LOADED_DEPS, engine, test_data) | 115 api = loader.create_recipe_api(recipe_script.LOADED_DEPS, engine, test_data) |
92 result = engine.run(recipe_script, api, test_data.properties) | 116 result = engine.run(recipe_script, api, test_data.properties) |
93 | 117 |
94 # Don't include tracebacks in expectations because they are too sensitive to | 118 # Don't include tracebacks in expectations because they are too sensitive to |
95 # change. | 119 # change. |
96 result.result.pop('traceback', None) | 120 result.result.pop('traceback', None) |
97 raw_expectations = step_runner.steps_ran + [result.result] | 121 raw_expectations = step_runner.steps_ran.copy() |
| 122 raw_expectations[result.result['name']] = result.result |
98 | 123 |
99 try: | 124 try: |
100 return RenderExpectation(test_data, raw_expectations) | 125 return _renderExpectation(test_data, raw_expectations) |
101 except: | 126 except: |
102 print | 127 print |
103 print "The expectations would have been:" | 128 print "The expectations would have been:" |
104 json.dump(raw_expectations, sys.stdout, indent=2) | 129 json.dump(raw_expectations, sys.stdout, indent=2) |
105 raise | 130 raise |
106 | 131 |
107 | 132 |
108 def test_gen_coverage(): | 133 def test_gen_coverage(): |
109 cover = [] | 134 cover = [] |
110 | 135 |
(...skipping 10 matching lines...) Expand all Loading... |
121 def cover_omit(): | 146 def cover_omit(): |
122 omit = [ ] | 147 omit = [ ] |
123 | 148 |
124 for mod_dir_base in _UNIVERSE.module_dirs: | 149 for mod_dir_base in _UNIVERSE.module_dirs: |
125 if os.path.isdir(mod_dir_base): | 150 if os.path.isdir(mod_dir_base): |
126 omit.append(os.path.join(mod_dir_base, '*', 'resources', '*')) | 151 omit.append(os.path.join(mod_dir_base, '*', 'resources', '*')) |
127 | 152 |
128 return omit | 153 return omit |
129 | 154 |
130 | 155 |
131 class InsufficientTestCoverage(Exception): pass | 156 class InsufficientTestCoverage(Exception): |
| 157 pass |
132 | 158 |
133 | 159 |
134 @expect_tests.covers(test_gen_coverage) | 160 @expect_tests.covers(test_gen_coverage) |
135 def GenerateTests(): | 161 def GenerateTests(): |
136 from . import loader | 162 from . import loader |
137 | 163 |
138 cover_mods = [ ] | 164 cover_mods = [ ] |
139 for mod_dir_base in _UNIVERSE.module_dirs: | 165 for mod_dir_base in _UNIVERSE.module_dirs: |
140 if os.path.isdir(mod_dir_base): | 166 if os.path.isdir(mod_dir_base): |
141 cover_mods.append(os.path.join(mod_dir_base, '*.py')) | 167 cover_mods.append(os.path.join(mod_dir_base, '*.py')) |
142 | 168 |
143 for recipe_path, recipe_name in _UNIVERSE.loop_over_recipes(): | 169 for recipe_path, recipe_name in _UNIVERSE.loop_over_recipes(): |
144 try: | 170 try: |
145 recipe = _UNIVERSE.load_recipe(recipe_name) | 171 recipe = _UNIVERSE.load_recipe(recipe_name) |
146 test_api = loader.create_test_api(recipe.LOADED_DEPS, _UNIVERSE) | 172 test_api = loader.create_test_api(recipe.LOADED_DEPS, _UNIVERSE) |
147 | 173 |
148 covers = cover_mods + [recipe_path] | 174 covers = cover_mods + [recipe_path] |
149 | 175 |
150 full_expectation_count = 0 | |
151 for test_data in recipe.gen_tests(test_api): | 176 for test_data in recipe.gen_tests(test_api): |
152 if not test_data.whitelist_data: | |
153 full_expectation_count += 1 | |
154 root, name = os.path.split(recipe_path) | 177 root, name = os.path.split(recipe_path) |
155 name = os.path.splitext(name)[0] | 178 name = os.path.splitext(name)[0] |
156 expect_path = os.path.join(root, '%s.expected' % name) | 179 expect_path = os.path.join(root, '%s.expected' % name) |
157 | |
158 test_data.properties['recipe'] = recipe_name.replace('\\', '/') | |
159 yield expect_tests.Test( | 180 yield expect_tests.Test( |
160 '%s.%s' % (recipe_name, test_data.name), | 181 '%s.%s' % (recipe_name, test_data.name), |
161 expect_tests.FuncCall(RunRecipe, test_data), | 182 expect_tests.FuncCall(RunRecipe, recipe_name, test_data.name), |
162 expect_dir=expect_path, | 183 expect_dir=expect_path, |
163 expect_base=test_data.name, | 184 expect_base=test_data.name, |
164 covers=covers, | 185 covers=covers, |
165 break_funcs=(recipe.run_steps,) | 186 break_funcs=(recipe.run_steps,) |
166 ) | 187 ) |
167 | |
168 if full_expectation_count < 1: | |
169 raise InsufficientTestCoverage( | |
170 'Must have at least 1 test without a whitelist!') | |
171 except: | 188 except: |
172 info = sys.exc_info() | 189 info = sys.exc_info() |
173 new_exec = Exception('While generating results for %r: %s: %s' % ( | 190 new_exec = Exception('While generating results for %r: %s: %s' % ( |
174 recipe_name, info[0].__name__, str(info[1]))) | 191 recipe_name, info[0].__name__, str(info[1]))) |
175 raise new_exec.__class__, new_exec, info[2] | 192 raise new_exec.__class__, new_exec, info[2] |
176 | 193 |
177 | 194 |
178 def main(universe, args=None): | 195 def main(universe, args=None): |
179 """Runs simulation tests on a given repo of recipes. | 196 """Runs simulation tests on a given repo of recipes. |
180 | 197 |
(...skipping 10 matching lines...) Expand all Loading... |
191 'TESTING_SLAVENAME']: | 208 'TESTING_SLAVENAME']: |
192 if env_var in os.environ: | 209 if env_var in os.environ: |
193 logging.warn("Ignoring %s environment variable." % env_var) | 210 logging.warn("Ignoring %s environment variable." % env_var) |
194 os.environ.pop(env_var) | 211 os.environ.pop(env_var) |
195 | 212 |
196 global _UNIVERSE | 213 global _UNIVERSE |
197 _UNIVERSE = universe | 214 _UNIVERSE = universe |
198 | 215 |
199 expect_tests.main('recipe_simulation_test', GenerateTests, | 216 expect_tests.main('recipe_simulation_test', GenerateTests, |
200 cover_omit=cover_omit(), args=args) | 217 cover_omit=cover_omit(), args=args) |
OLD | NEW |