Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(468)

Unified Diff: recipe_engine/simulation_test.py

Issue 2387763003: Add initial postprocess unit test thingy. (Closed)
Patch Set: rewrite parser code Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: recipe_engine/simulation_test.py
diff --git a/recipe_engine/simulation_test.py b/recipe_engine/simulation_test.py
index 17c1d1e511689cdb38340f1af3a4585b640e46df..9efc6c613aa1d10a51105cab444042b9fe0394a2 100644
--- a/recipe_engine/simulation_test.py
+++ b/recipe_engine/simulation_test.py
@@ -5,16 +5,24 @@
"""Provides simulator test coverage for individual recipes."""
import StringIO
+import ast
import contextlib
+import copy
import json
import logging
import os
import re
import sys
+import textwrap
+import traceback
+import inspect
+
+from collections import OrderedDict, namedtuple
from . import env
from . import stream
import expect_tests
+from .checker import Checker, VerifySubset
# This variable must be set in the dynamic scope of the functions in this file.
# We do this instead of passing because the threading system of expect tests
@@ -22,37 +30,39 @@ import expect_tests
_UNIVERSE = None
-def RenderExpectation(test_data, raw_expectations):
- """Applies the step filters (e.g. whitelists, etc.) to the raw_expectations,
- if the TestData actually contains any filters.
+class PostProcessError(ValueError):
+ pass
+
+
+def _renderExpectation(test_data, step_odict):
+ """Applies the step post_process actions to the step_odict, if the
+ TestData actually contains any.
Returns the final expect_tests.Result."""
- if test_data.whitelist_data:
- whitelist_data = dict(test_data.whitelist_data) # copy so we can mutate it
- def filter_expectation(step):
- whitelist = whitelist_data.pop(step['name'], None)
- if whitelist is None:
- return
-
- whitelist = set(whitelist) # copy so we can mutate it
- if len(whitelist) > 0:
- whitelist.add('name')
- step = {k: v for k, v in step.iteritems() if k in whitelist}
- whitelist.difference_update(step.keys())
- if whitelist:
- raise ValueError(
- "The whitelist includes fields %r in step %r, but those fields"
- " don't exist."
- % (whitelist, step['name']))
- return step
- raw_expectations = filter(filter_expectation, raw_expectations)
-
- if whitelist_data:
- raise ValueError(
- "The step names %r were included in the whitelist, but were never run."
- % [s['name'] for s in whitelist_data])
-
- return expect_tests.Result(raw_expectations)
+
+ failed_checks = []
+
+ for hook, args, kwargs, filename, lineno in test_data.post_process_hooks:
+ input_odict = copy.deepcopy(step_odict)
+ # we ignore the input_odict so that it never gets printed in full. Usually
+ # the check invocation itself will index the input_odict or will use it only
+ # for a key membership comparison, which provides enough debugging context.
+ checker = Checker(filename, lineno, hook, args, kwargs, input_odict)
+ rslt = hook(checker, input_odict, *args, **kwargs)
+ failed_checks += checker.failed_checks
+ if rslt is not None:
+ msg = VerifySubset(rslt, step_odict)
+ if msg:
+ raise PostProcessError('post_process: steps'+msg)
+ # restore 'name'
+ for k, v in rslt.iteritems():
+ if 'name' not in v:
+ v['name'] = k
+ step_odict = rslt
+
+ # empty means drop expectation
+ result_data = step_odict.values() if step_odict else None
+ return expect_tests.Result(result_data, failed_checks)
class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine):
@@ -70,13 +80,24 @@ class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine):
self.step_buffer(step_config.name))
-def RunRecipe(test_data):
+# This maps from (recipe_name,test_name) -> yielded test_data. It's outside of
+# RunRecipe so that it can persist between RunRecipe calls in the same process.
+_GEN_TEST_CACHE = {}
martiniss 2016/10/13 22:54:13 I don't see how this would do anything, is what I
iannucci 2016/10/13 23:05:05 We always run recipes many many times... one per t
+
+def RunRecipe(recipe_name, test_name):
"""Actually runs the recipe given the GenTests-supplied test_data."""
from . import config_types
from . import loader
from . import run
from . import step_runner
- from . import stream
+
+ if recipe_name not in _GEN_TEST_CACHE:
+ recipe_script = _UNIVERSE.load_recipe(recipe_name)
+ test_api = loader.create_test_api(recipe_script.LOADED_DEPS, _UNIVERSE)
+ for test_data in recipe_script.GenTests(test_api):
+ _GEN_TEST_CACHE[(recipe_name, test_data.name)] = test_data
+
+ test_data = _GEN_TEST_CACHE[(recipe_name, test_name)]
config_types.ResetTostringFns()
@@ -88,18 +109,21 @@ def RunRecipe(test_data):
step_runner = step_runner.SimulationStepRunner(stream_engine, test_data,
annotator)
- engine = run.RecipeEngine(step_runner, test_data.properties, _UNIVERSE)
- recipe_script = _UNIVERSE.load_recipe(test_data.properties['recipe'])
+ props = test_data.properties.copy()
+ props['recipe'] = recipe_name
+ engine = run.RecipeEngine(step_runner, props, _UNIVERSE)
+ recipe_script = _UNIVERSE.load_recipe(recipe_name)
api = loader.create_recipe_api(recipe_script.LOADED_DEPS, engine, test_data)
result = engine.run(recipe_script, api)
# Don't include tracebacks in expectations because they are too sensitive to
# change.
result.result.pop('traceback', None)
- raw_expectations = step_runner.steps_ran + [result.result]
+ raw_expectations = step_runner.steps_ran.copy()
+ raw_expectations[result.result['name']] = result.result
try:
- return RenderExpectation(test_data, raw_expectations)
+ return _renderExpectation(test_data, raw_expectations)
except:
print
print "The expectations would have been:"
@@ -130,7 +154,8 @@ def cover_omit():
return omit
-class InsufficientTestCoverage(Exception): pass
+class InsufficientTestCoverage(Exception):
+ pass
@expect_tests.covers(test_gen_coverage)
@@ -149,27 +174,18 @@ def GenerateTests():
covers = cover_mods + [recipe_path]
- full_expectation_count = 0
for test_data in recipe.GenTests(test_api):
- if not test_data.whitelist_data:
- full_expectation_count += 1
root, name = os.path.split(recipe_path)
name = os.path.splitext(name)[0]
expect_path = os.path.join(root, '%s.expected' % name)
-
- test_data.properties['recipe'] = recipe_name.replace('\\', '/')
yield expect_tests.Test(
'%s.%s' % (recipe_name, test_data.name),
- expect_tests.FuncCall(RunRecipe, test_data),
+ expect_tests.FuncCall(RunRecipe, recipe_name, test_data.name),
expect_dir=expect_path,
expect_base=test_data.name,
covers=covers,
break_funcs=(recipe.RunSteps,)
)
-
- if full_expectation_count < 1:
- raise InsufficientTestCoverage(
- 'Must have at least 1 test without a whitelist!')
except:
info = sys.exc_info()
new_exec = Exception('While generating results for %r: %s: %s' % (

Powered by Google App Engine
This is Rietveld 408576698