OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 import codecs | |
7 import os | |
8 import unittest | |
9 | |
10 import pyauto_functional | |
11 import pyauto | |
12 import pyauto_errors | |
13 import test_utils | |
14 | |
15 | |
16 class FindMatchTests(pyauto.PyUITest): | |
17 | |
18 # Data dir where all find test data files are kept | |
19 find_test_data_dir = 'find_in_page' | |
20 | |
21 def testCanFindMatchCount(self): | |
22 """Verify Find match count for valid search""" | |
23 url = self.GetFileURLForDataPath('title1.html') | |
24 self.NavigateToURL(url) | |
25 self.assertEqual(1, self.FindInPage('title')['match_count']) | |
26 | |
27 def testCanFindMatchCountFail(self): | |
28 """Verify Find match count for invalid search""" | |
29 url = self.GetFileURLForDataPath('title1.html') | |
30 self.NavigateToURL(url) | |
31 self.assertEqual(0, self.FindInPage('blah')['match_count']) | |
32 | |
33 def testFindIsNotCaseSensitive(self): | |
34 """Verify that find is not case sensitive. | |
35 | |
36 Manually Find is case insensitive. But since FindInPage is | |
37 case-sensitive by default we are confirming that we get a | |
38 different result when we turn off case matching. | |
39 """ | |
40 url = self.GetFileURLForDataPath('find_in_page', 'largepage.html') | |
41 self.NavigateToURL(url) | |
42 case_sensitive_result = self.FindInPage('The')['match_count'] | |
43 case_insenstive_result = (self.FindInPage('The', match_case=False) | |
44 ['match_count']) | |
45 self.assertTrue(case_insenstive_result >= case_sensitive_result) | |
46 | |
47 def testSearchInTextAreas(self): | |
48 """Verify search for text within various forms and text areas.""" | |
49 urls = [] | |
50 urls.append(self.GetFileURLForDataPath( | |
51 'find_in_page', 'textintextarea.html')) | |
52 urls.append(self.GetFileURLForDataPath( | |
53 'find_in_page', 'smalltextarea.html')) | |
54 urls.append(self.GetFileURLForDataPath( | |
55 'find_in_page', 'populatedform.html')) | |
56 for url in urls: | |
57 self.NavigateToURL(url) | |
58 self.assertEqual(1, self.FindInPage('cat')['match_count']) | |
59 self.assertEqual(0, self.FindInPage('bat')['match_count']) | |
60 | |
61 def testSearchWithinSpecialURL(self): | |
62 """Verify search for text within special URLs such as chrome:history. | |
63 chrome://history, chrome://downloads, pyAuto Data directory | |
64 """ | |
65 zip_file = 'a_zip_file.zip' | |
66 self.NavigateToURL(self.GetFileURLForPath(self.DataDir())) | |
67 # search in Data directory | |
68 self.assertEqual(1, | |
69 self.FindInPage('downloads', tab_index=0)['match_count']) | |
70 # search in History page | |
71 self.AppendTab(pyauto.GURL('chrome://history')) | |
72 # the contents in the history page load asynchronously after tab loads | |
73 search_query = os.path.join('chrome', 'test', 'data') | |
74 self.WaitUntil( | |
75 lambda: self.FindInPage(search_query, tab_index=1)['match_count'], | |
76 expect_retval=1) | |
77 self.assertEqual( | |
78 1, self.FindInPage(search_query, tab_index=1)['match_count']) | |
79 # search in Downloads page | |
80 test_utils.DownloadFileFromDownloadsDataDir(self, zip_file) | |
81 self.AppendTab(pyauto.GURL('chrome://downloads')) | |
82 # the contents in the downloads page load asynchronously after tab loads | |
83 self.WaitUntil( | |
84 lambda: self.FindInPage(zip_file, tab_index=2)['match_count'], | |
85 expect_retval=2) | |
86 self.assertEqual(2, | |
87 self.FindInPage(zip_file, tab_index=2)['match_count']) | |
88 test_utils.RemoveDownloadedTestFile(self, zip_file) | |
89 | |
90 def testFindNextAndPrevious(self): | |
91 """Verify search selection coordinates. | |
92 | |
93 The data file used is set-up such that the text occurs on the same line, | |
94 and we verify their positions by verifying their relative positions. | |
95 """ | |
96 search_string = u'\u5728\u897f\u660c\u536b\u661f\u53d1' | |
97 url = self.GetFileURLForDataPath( | |
98 self.find_test_data_dir, 'specialchar.html') | |
99 self.NavigateToURL(url) | |
100 first_find = self.FindInPage(search_string) | |
101 second_find = self.FindInPage(search_string, find_next=True) | |
102 # We have search occurrence in the same row, so top-bottom | |
103 # coordinates should be the same even for second search. | |
104 self.assertEqual(first_find['match_top'], second_find['match_top'], | |
105 'Words\' top coordinates should be same') | |
106 self.assertEqual(first_find['match_bottom'], second_find['match_bottom'], | |
107 'Words\' bottom coordinates should be same') | |
108 # And left-right coordinates should be in order. | |
109 self.assertTrue(first_find['match_left'] < second_find['match_left'], | |
110 'Second find left coordinate should be greater than ' | |
111 'the first find left coordinate') | |
112 self.assertTrue(first_find['match_right'] < second_find['match_right'], | |
113 'Second find right coordinate should be greater than ' | |
114 'the first find right coordinate') | |
115 first_find_reverse = self.FindInPage( | |
116 search_string, find_next=True, forward=False) | |
117 # We find next and we go back so find coordinates should be the same | |
118 # as previous ones. | |
119 self.assertEqual(first_find, first_find_reverse, | |
120 'First occurrence must be selected, since we went back') | |
121 | |
122 def testSpecialChars(self): | |
123 """Test find in page with unicode and special characters. | |
124 | |
125 Finds from page content, comments and meta data and verifies that comments | |
126 and meta data are not searchable. | |
127 """ | |
128 search_string = u'\u5728\u897f\u660c\u536b\u661f\u53d1' | |
129 url = self.GetFileURLForDataPath( | |
130 self.find_test_data_dir, 'specialchar.html') | |
131 self.NavigateToURL(url) | |
132 self.assertEqual(4, self.FindInPage(search_string)['match_count']) | |
133 search_string = u'240^*&%!#~!*&\u518d\u5c31\u8077\u624b\u5f53' | |
134 self.assertEqual(2, self.FindInPage(search_string)['match_count']) | |
135 # Find for the special chars in the comment and in the meta tag | |
136 search_string = u'\u4e2d\u65b0\u793e\u8bb0\u8005\u5b8b\u5409'\ | |
137 u'\u6cb3\u6444\u4e2d\u65b0\u7f51' | |
138 self.assertEqual(0, self.FindInPage(search_string)['match_count'], | |
139 'Chrome should not find chars from comment or meta tags') | |
140 | |
141 def testFindInLargePage(self): | |
142 """Find in a very large page""" | |
143 url = self.GetFileURLForDataPath(self.find_test_data_dir, 'largepage.html') | |
144 self.NavigateToURL(url) | |
145 self.assertEqual(373, self.FindInPage('daughter of Prince')['match_count']) | |
146 | |
147 def testFindLongString(self): | |
148 """Find a very long string in a large page""" | |
149 url = self.GetFileURLForDataPath( | |
150 self.find_test_data_dir, 'largepage.html') | |
151 self.NavigateToURL(url) | |
152 file = codecs.open(os.path.join(self.DataDir(), self.find_test_data_dir, | |
153 'LongFind.txt'), 'r', 'utf-8') | |
154 search = file.read() | |
155 self.assertEqual(1, self.FindInPage(search)['match_count']) | |
156 | |
157 def testFindBigString(self): | |
158 """Find a big font string in a page""" | |
159 url = self.GetFileURLForDataPath( | |
160 self.find_test_data_dir, 'BigText.html') | |
161 self.NavigateToURL(url) | |
162 self.assertEqual(1, self.FindInPage('SomeLargeString')['match_count']) | |
163 | |
164 def testVariousFindTests(self): | |
165 """Test find in page for <span> style text, lists, html comments, etc.""" | |
166 url = self.GetFileURLForDataPath( | |
167 self.find_test_data_dir, 'FindRandomTests.html') | |
168 self.NavigateToURL(url) | |
169 search = 'has light blue eyes and my father has dark' | |
170 self.assertEqual(1, self.FindInPage(search)['match_count'], | |
171 'Failed to find text with <span> tag') | |
172 # Find for list items | |
173 search = 'Google\nApple\nandroid' | |
174 self.assertEqual(1, self.FindInPage(search)['match_count'], | |
175 'Failed to find the list items') | |
176 # Find HTML comments | |
177 self.assertEqual(0, self.FindInPage('example comment')['match_count'], | |
178 'We should not find HTML comments') | |
179 | |
180 def testFindWholeFileContent(self): | |
181 """Find the whole text file page and find count should be 1""" | |
182 find_test_file = os.path.join(self.DataDir(), self.find_test_data_dir, | |
183 'find_test.txt') | |
184 url = self.GetFileURLForPath(find_test_file) | |
185 self.NavigateToURL(url) | |
186 file = open(find_test_file) | |
187 search = file.read() | |
188 self.assertEqual(1, self.FindInPage(search)['match_count'], | |
189 'Failed to find the whole page') | |
190 | |
191 def testSingleOccurrence(self): | |
192 """Search Back and Forward on a single occurrence""" | |
193 url = self.GetFileURLForDataPath( | |
194 self.find_test_data_dir, 'FindRandomTests.html') | |
195 self.NavigateToURL(url) | |
196 self.assertEqual(1, self.FindInPage('2010 Pro Bowl')['match_count']) | |
197 # First occurrence find | |
198 first_occurence_dict = self.FindInPage('2010 Pro Bowl') | |
199 # Finding next occurrence | |
200 next_occurence_dict = self.FindInPage('2010 Pro Bowl', find_next = True) | |
201 self.assertEqual(first_occurence_dict, next_occurence_dict, | |
202 'We have only one occurrence in this page so' | |
203 'first and next coordinates must be same') | |
204 # Doing a fake find so we have no previous search | |
205 self.FindInPage('ghgfjgfh201232rere') | |
206 | |
207 first_occurence_dict = self.FindInPage('2010 Pro Bowl') | |
208 # Finding previous occurrence | |
209 back_occurence_dict = self.FindInPage('2010 Pro Bowl', | |
210 find_next = True, forward = False) | |
211 self.assertEqual(first_occurence_dict, back_occurence_dict, | |
212 'We have only one occurrence in this page so ' | |
213 'first and back search coordinates must be same') | |
214 | |
215 def _VerifySearchInPDFURL(self, url, word, expected_count): | |
216 """Verify that we can find in a pdf file.""" | |
217 self.NavigateToURL(url) | |
218 # Check for JSONInterfaceError thrown when FindInPage called before page | |
219 # loaded crbug.com/107448. | |
220 num_loops = 10 | |
221 for loop in range(num_loops): | |
222 try: | |
223 search_count = self.FindInPage(word, timeout=1000)['match_count'] | |
224 break | |
225 except pyauto_errors.JSONInterfaceError: | |
226 if loop == num_loops - 1: | |
227 raise | |
228 self.assertEqual(expected_count, search_count, | |
229 'Failed to find in the %s pdf file' % url) | |
230 | |
231 def testSearchInPDF(self): | |
232 """Verify that we can find in a pdf file. | |
233 | |
234 Only for Google Chrome builds (Chromium builds do not have internal pdf). | |
235 """ | |
236 # bail out if not a branded build | |
237 properties = self.GetBrowserInfo()['properties'] | |
238 if properties['branding'] != 'Google Chrome': | |
239 return | |
240 # Search in pdf file over file://. | |
241 file_url = self.GetFileURLForContentDataPath('plugin', 'Embed.pdf') | |
242 self._VerifySearchInPDFURL(file_url, 'adobe', 8) | |
243 | |
244 # Search in pdf file over http://. | |
245 http_url = 'http://www.irs.gov/pub/irs-pdf/fw4.pdf' | |
246 self._VerifySearchInPDFURL(http_url, 'Allowances', 16) | |
247 | |
248 if __name__ == '__main__': | |
249 pyauto_functional.Main() | |
OLD | NEW |