| Index: tools/telemetry/telemetry/page/page_set.py
|
| diff --git a/tools/telemetry/telemetry/page/page_set.py b/tools/telemetry/telemetry/page/page_set.py
|
| index a01b80a73b61e463af5c5b7b3b12075d03b8729a..6a0e165e511ea42e0105fe3a01c5d9dc933093ec 100644
|
| --- a/tools/telemetry/telemetry/page/page_set.py
|
| +++ b/tools/telemetry/telemetry/page/page_set.py
|
| @@ -1,6 +1,7 @@
|
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
| # Use of this source code is governed by a BSD-style license that can be
|
| # found in the LICENSE file.
|
| +
|
| import csv
|
| import json
|
| import os
|
| @@ -9,11 +10,14 @@ from telemetry.page import cloud_storage
|
| from telemetry.page import page as page_module
|
| from telemetry.page import page_set_archive_info
|
|
|
| +
|
| class PageSet(object):
|
| def __init__(self, file_path='', attributes=None):
|
| + self.file_path = file_path
|
| +
|
| + # These attributes can be set dynamically by the page set.
|
| self.description = ''
|
| self.archive_data_file = ''
|
| - self.file_path = file_path
|
| self.credentials_path = None
|
| self.user_agent_type = None
|
| self.make_javascript_deterministic = True
|
| @@ -40,24 +44,33 @@ class PageSet(object):
|
| url, self, attributes=page_attributes, base_dir=self._base_dir)
|
| self.pages.append(page)
|
|
|
| + # Prepend _base_dir to our serving dirs.
|
| + # Always use realpath to ensure no duplicates in set.
|
| + self.serving_dirs = set()
|
| + if attributes and 'serving_dirs' in attributes:
|
| + if not isinstance(attributes['serving_dirs'], list):
|
| + raise ValueError('serving_dirs must be a list.')
|
| + for serving_dir in attributes['serving_dirs']:
|
| + self.serving_dirs.add(
|
| + os.path.realpath(os.path.join(self._base_dir, serving_dir)))
|
| +
|
| # Attempt to download the credentials file.
|
| if self.credentials_path:
|
| cloud_storage.GetIfChanged(
|
| cloud_storage.INTERNAL_BUCKET,
|
| os.path.join(self._base_dir, self.credentials_path))
|
|
|
| - # For every file:// URL, scan that directory for .sha1 files,
|
| + # Scan every serving directory for .sha1 files
|
| # and download them from Cloud Storage. Assume all data is public.
|
| - all_serving_dirs = set()
|
| + all_serving_dirs = self.serving_dirs.copy()
|
| + # Add individual page dirs to all serving dirs.
|
| for page in self:
|
| if page.is_file:
|
| - serving_dirs, _ = page.serving_dirs_and_file
|
| - if isinstance(serving_dirs, list):
|
| - all_serving_dirs |= set(serving_dirs)
|
| - else:
|
| - all_serving_dirs.add(serving_dirs)
|
| -
|
| + all_serving_dirs.add(page.serving_dir)
|
| + # Scan all serving dirs.
|
| for serving_dir in all_serving_dirs:
|
| + if serving_dir == '/':
|
| + raise ValueError('Trying to serve "/" from HTTP server.')
|
| for dirpath, _, filenames in os.walk(serving_dir):
|
| for filename in filenames:
|
| path, extension = os.path.splitext(
|
|
|