| Index: telemetry/telemetry/benchmark.py
|
| diff --git a/telemetry/telemetry/benchmark.py b/telemetry/telemetry/benchmark.py
|
| index 3cea7caa5cd21ff0108665eaf34c33414e432ff4..04dad73967b8b6427b78541a12971bbd5ee0b99c 100644
|
| --- a/telemetry/telemetry/benchmark.py
|
| +++ b/telemetry/telemetry/benchmark.py
|
| @@ -63,6 +63,7 @@ class Benchmark(command_line.Command):
|
| options = {}
|
| page_set = None
|
| test = timeline_based_measurement.TimelineBasedMeasurement
|
| + SUPPORTED_PLATFORMS = [expectations.ALL]
|
|
|
| def __init__(self, max_failures=None):
|
| """Creates a new Benchmark.
|
| @@ -82,6 +83,16 @@ class Benchmark(command_line.Command):
|
| # See https://github.com/catapult-project/catapult/issues/3708
|
|
|
|
|
| + def _CanRunOnPlatform(self, platform, finder_options):
|
| + for p in self.SUPPORTED_PLATFORMS:
|
| + # This is reusing StoryExpectation code, so it is a bit unintuitive. We
|
| + # are trying to detect the opposite of the usual case in StoryExpectations
|
| + # so we want to return True when ShouldDisable returns true, even though
|
| + # we do not want to disable.
|
| + if p.ShouldDisable(platform, finder_options):
|
| + return True
|
| + return False
|
| +
|
| # pylint: disable=unused-argument
|
| @classmethod
|
| def ShouldDisable(cls, possible_browser):
|
|
|