OLD | NEW |
| (Empty) |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 import os | |
5 import re | |
6 import time | |
7 import urlparse | |
8 | |
9 from telemetry import util | |
10 | |
11 class Page(object): | |
12 def __init__(self, url, page_set, attributes=None, base_dir=None): | |
13 parsed_url = urlparse.urlparse(url) | |
14 if not parsed_url.scheme: | |
15 abspath = os.path.abspath(os.path.join(base_dir, parsed_url.path)) | |
16 if os.path.exists(abspath): | |
17 url = 'file://%s' % os.path.abspath(os.path.join(base_dir, url)) | |
18 else: | |
19 raise Exception('URLs must be fully qualified: %s' % url) | |
20 self.url = url | |
21 self.page_set = page_set | |
22 self.base_dir = base_dir | |
23 self.credentials = None | |
24 self.disabled = False | |
25 self.wait_time_after_navigate = 2 | |
26 | |
27 if attributes: | |
28 for k, v in attributes.iteritems(): | |
29 setattr(self, k, v) | |
30 | |
31 # NOTE: This assumes the page_set file uses 'file:///' instead of 'file://', | |
32 # otherwise the '/' will be missing between page_set.base_dir and | |
33 # parsed_url.path. | |
34 @property | |
35 def url_base_dir_and_file(self): | |
36 parsed_url = urlparse.urlparse(self.url) | |
37 | |
38 # Don't use os.path.join otherwise netloc and path can't point to relative | |
39 # directories. | |
40 assert parsed_url.path[0] == '/' | |
41 | |
42 path = self.base_dir + parsed_url.netloc + parsed_url.path | |
43 | |
44 if hasattr(self, 'url_base_dir'): | |
45 parsed_url = urlparse.urlparse(self.url_base_dir) | |
46 base_path = self.base_dir + parsed_url.netloc + parsed_url.path | |
47 return (base_path, path.replace(base_path, '')) | |
48 | |
49 return os.path.split(path) | |
50 | |
51 # A version of this page's URL that's safe to use as a filename. | |
52 @property | |
53 def url_as_file_safe_name(self): | |
54 # Just replace all special characters in the url with underscore. | |
55 return re.sub('[^a-zA-Z0-9]', '_', self.url) | |
56 | |
57 @property | |
58 def display_url(self): | |
59 if self.url.startswith('file://'): | |
60 return os.path.split(self.url)[1] | |
61 return re.sub('https?://', '', self.url) | |
62 | |
63 @property | |
64 def archive_path(self): | |
65 return self.page_set.WprFilePathForPage(self) | |
66 | |
67 def __str__(self): | |
68 return self.url | |
69 | |
70 def WaitToLoad(self, tab, timeout, poll_interval=0.1): | |
71 Page.WaitForPageToLoad(self, tab, timeout, poll_interval) | |
72 | |
73 # TODO(dtu): Remove this method when no page sets use a click interaction | |
74 # with a wait condition. crbug.com/168431 | |
75 @staticmethod | |
76 def WaitForPageToLoad(obj, tab, timeout, poll_interval=0.1): | |
77 """Waits for various wait conditions present in obj.""" | |
78 if hasattr(obj, 'post_navigate_javascript_to_execute'): | |
79 tab.EvaluateJavaScript(obj.post_navigate_javascript_to_execute) | |
80 | |
81 if hasattr(obj, 'wait_seconds'): | |
82 time.sleep(obj.wait_seconds) | |
83 if hasattr(obj, 'wait_for_element_with_text'): | |
84 callback_code = 'function(element) { return element != null; }' | |
85 util.WaitFor( | |
86 lambda: util.FindElementAndPerformAction( | |
87 tab, obj.wait_for_element_with_text, callback_code), | |
88 timeout, poll_interval) | |
89 if hasattr(obj, 'wait_for_element_with_selector'): | |
90 util.WaitFor(lambda: tab.EvaluateJavaScript( | |
91 'document.querySelector(\'' + obj.wait_for_element_with_selector + | |
92 '\') != null'), timeout, poll_interval) | |
93 if hasattr(obj, 'wait_for_javascript_expression'): | |
94 util.WaitFor( | |
95 lambda: tab.EvaluateJavaScript(obj.wait_for_javascript_expression), | |
96 timeout, poll_interval) | |
OLD | NEW |