OLD | NEW |
---|---|
(Empty) | |
1 # Copyright 2015 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 import argparse | |
6 import collections | |
7 import copy | |
8 import datetime | |
9 import itertools | |
10 import logging | |
11 import subprocess | |
12 import time | |
13 import unittest | |
14 import urllib2 | |
15 | |
16 import dateutil | |
17 | |
18 from infra.tools.cq_stats import cq_stats | |
19 | |
20 | |
21 class Args(object): | |
22 def __init__(self, **kwargs): | |
23 self.project = 'test_project' | |
24 self.list_rejections = False | |
25 self.list_false_rejections = False | |
26 self.use_logs = False | |
27 self.date = datetime.datetime(2014, 1, 1) | |
28 self.range = 'week' | |
29 self.verbose = 'error' | |
30 self.seq = 'false' | |
31 self.thread_pool = 3 | |
32 for name, val in kwargs.iteritems(): | |
33 self.__dict__[name] = val | |
34 | |
35 | |
36 class ResponseMock(object): | |
37 """Mock out Response class for urllib2.urlopen().""" | |
38 def __init__(self, lines, retries): | |
39 self.lines = lines | |
40 self.retries = retries | |
41 | |
42 def read(self): | |
43 return '\n'.join(self.lines) | |
44 | |
45 def __iter__(self): | |
46 return self.lines.__iter__() | |
47 | |
48 | |
49 def urlopen_mock(lines, retries=0): | |
50 obj = ResponseMock(lines, retries) | |
51 def func(_): | |
52 if obj.retries: | |
53 obj.retries -= 1 | |
54 raise IOError('mock error') | |
55 return obj | |
56 return func | |
57 | |
58 | |
59 class TestCQStats(unittest.TestCase): | |
60 _saved = None | |
61 | |
62 def mock(self, obj, member, mock): | |
Sergey Berezin
2015/05/05 17:06:18
nit: infra.git now depends on testing_support, whi
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
63 self._saved = self._saved or collections.OrderedDict() | |
64 old_value = self._saved.setdefault( | |
65 obj, collections.OrderedDict()).setdefault(member, getattr(obj, member)) | |
66 setattr(obj, member, mock) | |
67 return old_value | |
68 | |
69 def setUp(self): | |
70 super(TestCQStats, self).setUp() | |
71 self.expectations = [] | |
72 | |
73 def tearDown(self): | |
74 """Restore all the mocked members.""" | |
75 if self._saved: | |
76 for obj, items in self._saved.iteritems(): | |
77 for member, previous_value in items.iteritems(): | |
78 setattr(obj, member, previous_value) | |
79 self._saved = None | |
80 self.expectations = [] | |
81 super(TestCQStats, self).tearDown() | |
82 | |
83 def print_mock(self, fmt='', *args): | |
84 # Make sure lines are correctly split when \n is in the string. | |
85 # This preserves the expectations when going from | |
86 # print;print('string') to print('\nstring'). | |
87 self.expectations += ((fmt + '\n') % args).splitlines() | |
88 | |
89 def test_output(self): | |
90 cq_stats.output('') | |
91 | |
92 def test_parse_args(self): | |
93 self.mock(argparse.ArgumentParser, 'parse_args', | |
94 lambda *_: Args(date='2014-01-01')) | |
95 self.assertIsNotNone(cq_stats.parse_args()) | |
96 self.mock(argparse.ArgumentParser, 'parse_args', | |
97 lambda *_: Args(date=None)) | |
98 self.assertIsNotNone(cq_stats.parse_args()) | |
99 | |
100 def test_date_from_string(self): | |
101 self.assertRaises(ValueError, cq_stats.date_from_string, 'bad time') | |
102 self.assertEqual(cq_stats.date_from_string('2014-10-15'), | |
103 datetime.datetime(2014, 10, 15)) | |
104 | |
105 def test_date_from_timestamp(self): | |
106 self.assertIs(type(cq_stats.date_from_timestamp(12345678.9)), | |
107 datetime.datetime) | |
108 | |
109 def test_date_from_git(self): | |
110 self.assertIsNone(cq_stats.date_from_git('')) | |
111 self.assertIsNone(cq_stats.date_from_git('bad time')) | |
112 self.assertEqual(cq_stats.date_from_git('Tue Oct 21 22:38:39 2014'), | |
113 datetime.datetime(2014, 10, 21, 22, 38, 39)) | |
114 | |
115 def test_fetch_json(self): | |
116 self.mock(time, 'sleep', lambda n: None) | |
117 | |
118 self.mock(urllib2, 'urlopen', urlopen_mock(['{"a": "b"}'])) | |
119 self.assertEqual(cq_stats.fetch_json('https://'), {'a': 'b'}) | |
120 | |
121 self.mock(urllib2, 'urlopen', urlopen_mock(['{"a": "b"}'], retries=1)) | |
122 self.assertEqual(cq_stats.fetch_json('https://'), {'a': 'b'}) | |
123 | |
124 self.mock(urllib2, 'urlopen', urlopen_mock(['{"a": "b"}'], retries=100)) | |
125 self.assertEqual(cq_stats.fetch_json('https://'), {'error': '404'}) | |
126 | |
127 self.mock(urllib2, 'urlopen', urlopen_mock(['{([bad json'])) | |
128 self.assertEqual(cq_stats.fetch_json('https://'), {'error': '404'}) | |
129 | |
130 def test_fetch_tree_status(self): | |
131 # Invalid result | |
132 self.mock(cq_stats, 'fetch_json', lambda url: {}) | |
133 self.assertEqual([], cq_stats.fetch_tree_status( | |
134 'chromium', datetime.datetime(2014, 10, 15))) | |
135 # Valid result | |
136 res = [{'date': '2014-10-01 14:54:44.553', | |
137 'general_state': 'open'}, | |
138 {'date': '2014-10-14 10:54:44', | |
139 'general_state': 'closed'}, | |
140 {'date': '2014-10-16 10:54:44', | |
141 'general_state': 'closed'}, | |
142 ] | |
143 self.mock(cq_stats, 'fetch_json', lambda url: res) | |
144 | |
145 status = cq_stats.fetch_tree_status( | |
146 'chromium', datetime.datetime(2014, 10, 15)) | |
147 self.assertListEqual([ | |
148 {'date': datetime.datetime(2014, 10, 1, 16, 54, 44), 'open': True}, | |
149 {'date': datetime.datetime(2014, 10, 14, 12, 54, 44), 'open': False} | |
150 ], status) | |
151 | |
152 status = cq_stats.fetch_tree_status( | |
153 'chromium', datetime.datetime(2014, 10, 17), | |
154 start_date= datetime.datetime(2014, 10, 15)) | |
155 self.assertListEqual([ | |
156 {'date': datetime.datetime(2014, 10, 16, 12, 54, 44), 'open': False}, | |
157 ], status) | |
158 | |
159 def test_fetch_git_page(self): | |
160 self.mock(urllib2, 'urlopen', urlopen_mock(['{([bad json'])) | |
161 self.assertEqual({}, cq_stats.fetch_git_page('url')) | |
162 self.mock(urllib2, 'urlopen', urlopen_mock([ | |
163 ")]}'", '{"json": 1}', | |
164 ])) | |
165 self.assertEqual({'json': 1}, cq_stats.fetch_git_page('url')) | |
166 self.assertEqual({'json': 1}, | |
167 cq_stats.fetch_git_page('url', cursor='cursor')) | |
168 | |
169 def test_fetch_git_logs(self): | |
170 pages = [ | |
171 {'log': [ | |
172 {'author': {'email': 'noone@chromium.org'}, | |
173 'committer': {'email': 'commit-bot@chromium.org', | |
174 'time': 'Tue Dec 23 22:38:39 2014'}}, | |
175 {'author': {'email': 'noone@chromium.org'}, | |
176 'committer': {'email': 'commit-bot@chromium.org', | |
177 'time': 'Tue Nov 23 22:38:39 2014'}}, | |
178 {'author': {'email': 'someone@chromium.org'}, | |
179 'committer': {'email': 'anyone@chromium.org', | |
180 'time': 'Tue Oct 22 22:38:39 2014'}}, | |
181 {'author': {'email': 'blink-deps-roller@chromium.org'}, | |
182 'committer': {'email': 'commit-bot@chromium.org', | |
183 'time': 'Tue Oct 21 23:38:39 2014'}}, | |
184 {'author': {'email': 'blink-deps-roller@chromium.org'}, | |
185 'committer': {'email': 'blink-deps-roller@chromium.org', | |
186 'time': 'Tue Oct 21 22:38:39 2014'}} | |
187 ], | |
188 'next': 1, | |
189 }, | |
190 {'log': [ | |
191 {'author': {'email': 'someone@chromium.org'}, | |
192 'committer': {'email': 'anyone@chromium.org'}}, | |
193 {'author': {'email': 'nobody@chromium.org'}, | |
194 'committer': {'email': 'commit-bot@chromium.org', | |
195 'time': 'Tue Sep 21 22:38:39 2014'}}, | |
196 ], | |
197 }, | |
198 ] | |
199 # Unused arguments: pylint: disable=W0613 | |
200 def fetch_mock(repo_url, cursor=None, page_size=2000): | |
201 if not cursor: | |
202 cursor = 0 | |
203 return pages[int(cursor)] | |
204 | |
205 self.mock(cq_stats, 'fetch_git_page', fetch_mock) | |
206 | |
207 data = cq_stats.fetch_git_logs( | |
208 'chromium', | |
209 datetime.datetime(2014, 10, 1), | |
210 datetime.datetime(2014, 12, 1)) | |
211 self.assertListEqual([ | |
Sergey Berezin
2015/05/05 17:06:18
Consider using built-in expectations:
return da
Paweł Hajdan Jr.
2015/05/06 11:09:15
Note there are tricky issues with datetime here. D
pgervais
2015/05/07 00:37:21
I you serialize the datetime here in the test func
| |
212 { | |
213 'date': datetime.datetime(2014, 11, 23, 22, 38, 39), | |
214 'revision': None, | |
215 'commit-bot': True, | |
216 'author': 'noone@chromium.org' | |
217 }, | |
218 { | |
219 'date': datetime.datetime(2014, 10, 22, 22, 38, 39), | |
220 'revision': None, | |
221 'commit-bot': False, | |
222 'author': 'someone@chromium.org' | |
223 }, | |
224 { | |
225 'date': datetime.datetime(2014, 10, 21, 23, 38, 39), | |
226 'revision': None, | |
227 'commit-bot': True, | |
228 'author': 'blink-deps-roller@chromium.org' | |
229 }, | |
230 { | |
231 'date': datetime.datetime(2014, 10, 21, 22, 38, 39), | |
232 'revision': None, | |
233 'commit-bot': False, | |
234 'author': 'blink-deps-roller@chromium.org' | |
235 }, | |
236 ], data) | |
237 | |
238 derived_data = cq_stats.derive_git_stats( | |
239 'chromium', | |
240 datetime.datetime(2014, 9, 1), | |
241 datetime.datetime(2014, 12, 1), | |
242 ['blink-deps-roller@chromium.org']) | |
243 self.assertDictEqual({ | |
Sergey Berezin
2015/05/05 17:06:18
Likewise, consider using expectations. For both va
| |
244 'bot_commits': 2, | |
245 'bot_committers': 1, | |
246 'bot_manual_commits': 1, | |
247 'committers': 4, | |
248 'cq_commits': 3, | |
249 'manual_commits': 2, | |
250 'manual_committers': 2, | |
251 'manual_only_committers': {'someone@chromium.org': 1}, | |
252 'total_commits': 5, | |
253 'users': 3, | |
254 }, derived_data) | |
255 | |
256 def test_fetch_svn_logs(self): | |
257 xml = """<?xml version="1.0" encoding="UTF-8"?> | |
258 <log> | |
259 <logentry | |
260 revision="184775"> | |
261 <author>amikhaylova@google.com</author> | |
262 <date>2014-11-01T20:49:20.468030Z</date> | |
263 <msg>Move Promise Tracker out of hidden experiments. | |
264 | |
265 BUG=348919 | |
266 | |
267 Review URL: https://codereview.chromium.org/697833002</msg> | |
268 <revprops> | |
269 <property | |
270 name="commit-bot">commit-bot@chromium.org</property> | |
271 </revprops> | |
272 </logentry> | |
273 <logentry | |
274 revision="184774"> | |
275 <author>amikhaylova@google.com</author> | |
276 <date>2014-11-01T20:49:20.468030Z</date> | |
277 <msg>Move Promise Tracker out of hidden experiments. | |
278 | |
279 BUG=348919 | |
280 | |
281 Review URL: https://codereview.chromium.org/697833002</msg> | |
282 <revprops> | |
283 <property | |
284 name="foo">bar</property> | |
285 </revprops> | |
286 </logentry> | |
287 <logentry | |
288 revision="184773"> | |
289 <author>amikhaylova@google.com</author> | |
290 <date>2014-11-01T20:49:20.468030Z</date> | |
291 <msg>Move Promise Tracker out of hidden experiments. | |
292 | |
293 BUG=348919 | |
294 | |
295 Review URL: https://codereview.chromium.org/697833002</msg> | |
296 </logentry> | |
297 </log> | |
298 """ | |
299 self.mock(subprocess, 'check_output', lambda *_: xml) | |
300 data = cq_stats.fetch_svn_logs( | |
301 'chromium', | |
302 datetime.datetime(2014, 1, 1), | |
303 datetime.datetime(2014, 1, 1)) | |
304 self.assertListEqual([ | |
Sergey Berezin
2015/05/05 17:06:18
Consider using expectations.
| |
305 { | |
306 'date': datetime.datetime( | |
307 2014, 11, 1, 20, 49, 20, 468030, tzinfo=dateutil.tz.tzutc()), | |
308 'revprops': {'commit-bot': 'commit-bot@chromium.org'}, | |
309 'commit-bot': True, | |
310 'author': 'amikhaylova@google.com' | |
311 }, | |
312 { | |
313 'date': datetime.datetime( | |
314 2014, 11, 1, 20, 49, 20, 468030, tzinfo=dateutil.tz.tzutc()), | |
315 'revprops': {'foo': 'bar'}, | |
316 'commit-bot': False, | |
317 'author': 'amikhaylova@google.com' | |
318 }, | |
319 { | |
320 'date': datetime.datetime( | |
321 2014, 11, 1, 20, 49, 20, 468030, tzinfo=dateutil.tz.tzutc()), | |
322 'revprops': {}, | |
323 'commit-bot': False, | |
324 'author': 'amikhaylova@google.com' | |
325 }, | |
326 ], data) | |
327 | |
328 derived_data = cq_stats.derive_svn_stats( | |
329 'chromium', | |
330 datetime.datetime(2014, 1, 1), | |
331 datetime.datetime(2014, 1, 1), | |
332 []) | |
333 self.assertDictEqual({ | |
Sergey Berezin
2015/05/05 17:06:18
Consider using expectations.
| |
334 'bot_commits': 0, | |
335 'bot_committers': 0, | |
336 'bot_manual_commits': 0, | |
337 'committers': 1, | |
338 'cq_commits': 1, | |
339 'manual_commits': 2, | |
340 'manual_committers': 1, | |
341 'manual_only_committers': {}, | |
342 'total_commits': 3, | |
343 'users': 1, | |
344 }, derived_data) | |
345 | |
346 def test_fetch_stats(self): | |
347 self.mock(cq_stats, 'fetch_json', lambda _: 'json') | |
348 self.assertEqual('json', cq_stats.fetch_stats(Args())) | |
349 self.assertEqual('json', cq_stats.fetch_stats(Args(date=None))) | |
350 self.assertEqual('json', cq_stats.fetch_stats( | |
351 Args(), datetime.datetime(2014, 10, 15))) | |
352 self.assertEqual('json', cq_stats.fetch_stats( | |
353 Args(), datetime.datetime(2014, 10, 15), 'day')) | |
354 | |
355 def test_fetch_cq_logs(self): | |
356 def mkresults(series): | |
357 return [{'a': n} for n in series] | |
358 pages_default = [ | |
359 {'more': True, | |
360 'cursor': '!@#$%^', | |
361 'results': mkresults(range(1, 3)), | |
362 }, | |
363 {'more': False, | |
364 'results': mkresults(range(3, 6)), | |
365 }, | |
366 ] | |
367 expected_result = mkresults(range(1, 6)) | |
368 | |
369 start_date = datetime.datetime(2014, 10, 15) | |
370 end_date = datetime.datetime(2014, 10, 20) | |
371 pages = [] | |
372 | |
373 def fetch_json_mock(_): | |
374 return pages.pop(0) | |
375 | |
376 self.mock(cq_stats, 'fetch_json', fetch_json_mock) | |
377 pages[:] = pages_default | |
378 self.assertEqual(cq_stats.fetch_cq_logs(), expected_result) | |
379 pages[:] = pages_default | |
380 self.assertEqual(cq_stats.fetch_cq_logs(start_date=start_date), | |
381 expected_result) | |
382 pages[:] = pages_default | |
383 self.assertEqual(cq_stats.fetch_cq_logs(end_date=end_date), | |
384 expected_result) | |
385 | |
386 def test_organize_stats(self): | |
387 stats = {'results': [ | |
388 {'begin': t, | |
389 'stats': [ | |
390 {'count': 3, 'type': 'count', | |
391 'name': 'attempt-count'}, | |
392 {'count': 2, 'type': 'count', | |
393 'name': 'trybot-bot-false-reject-count'}, | |
394 {'count': 1, 'type': 'count', | |
395 'name': 'trybot-bot-pass-count'}, | |
396 {'description': 'Total time spent per CQ attempt.', | |
397 'max': 9999.99999, | |
398 'percentile_25': 2512.34567, | |
399 'percentile_75': 7512.34567, | |
400 'percentile_10': 1012.34567, | |
401 'unit': 'seconds', | |
402 'name': 'attempt-durations', | |
403 'percentile_50': 5012.34567, | |
404 'min': 0.00001, | |
405 'sample_size': 10000, | |
406 'percentile_90': 9012.34567, | |
407 'percentile_95': 9512.34567, | |
408 'percentile_99': 9912.34567, | |
409 'type': 'list', | |
410 'mean': 5555.555555}, | |
411 ], | |
412 'interval_minutes': 15, | |
413 'project': 'chromium', | |
414 'key': 5976204561612800, | |
415 'end': t + 900} for t in [1415138400, 1415139300]]} | |
416 | |
417 result = cq_stats.organize_stats(stats) | |
418 self.assertDictEqual({ | |
Sergey Berezin
2015/05/05 17:06:19
This definitely needs to go into expectations - it
| |
419 'latest': { | |
420 'attempt-count': 3, | |
421 'attempt-durations': { | |
422 '10': 1012.34567, | |
423 '25': 2512.34567, | |
424 '50': 5012.34567, | |
425 '75': 7512.34567, | |
426 '90': 9012.34567, | |
427 '95': 9512.34567, | |
428 '99': 9912.34567, | |
429 'max': 9999.99999, | |
430 'mean': 5555.555555, | |
431 'min': 1e-05, | |
432 'size': 10000 | |
433 }, | |
434 'attempt-false-reject-count': 0, | |
435 'attempt-reject-count': 0, | |
436 'begin': datetime.datetime(2014, 11, 4, 23, 0), | |
437 'end': datetime.datetime(2014, 11, 4, 23, 15), | |
438 'failed-commit': [], | |
439 'failed-jobs': [], | |
440 'failed-patch': [], | |
441 'failed-presubmit-bot': [], | |
442 'failed-presubmit-check': [], | |
443 'failed-to-trigger': [], | |
444 'failed-unknown': [], | |
445 'false-rejections': [], | |
446 'invalid-delimiter': [], | |
447 'issue-count': 0, | |
448 'jobs': {'bot': {'false-reject-count': 2, 'pass-count': 1}}, | |
449 'manual-cancel': [], | |
450 'missing-lgtm': [], | |
451 'not-lgtm': [], | |
452 'patch_stats': {}, | |
453 'patchset-attempts': { | |
454 '10': 0.0, | |
455 '25': 0.0, | |
456 '50': 0.0, | |
457 '75': 0.0, | |
458 '90': 0.0, | |
459 '95': 0.0, | |
460 '99': 0.0, | |
461 'max': 0, | |
462 'mean': 0.0, | |
463 'min': 0, | |
464 'size': 1 | |
465 }, | |
466 'patchset-commit-count': 0, | |
467 'patchset-committed-attempts': { | |
468 '10': 0.0, | |
469 '25': 0.0, | |
470 '50': 0.0, | |
471 '75': 0.0, | |
472 '90': 0.0, | |
473 '95': 0.0, | |
474 '99': 0.0, | |
475 'max': 0, | |
476 'mean': 0.0, | |
477 'min': 0, | |
478 'size': 1 | |
479 }, | |
480 'patchset-committed-durations': { | |
481 '10': 0.0, | |
482 '25': 0.0, | |
483 '50': 0.0, | |
484 '75': 0.0, | |
485 '90': 0.0, | |
486 '95': 0.0, | |
487 '99': 0.0, | |
488 'max': 0, | |
489 'mean': 0.0, | |
490 'min': 0, | |
491 'size': 1 | |
492 }, | |
493 'patchset-count': 0, | |
494 'patchset-durations': { | |
495 '10': 0.0, | |
496 '25': 0.0, | |
497 '50': 0.0, | |
498 '75': 0.0, | |
499 '90': 0.0, | |
500 '95': 0.0, | |
501 '99': 0.0, | |
502 'max': 0, | |
503 'mean': 0.0, | |
504 'min': 0, | |
505 'size': 1 | |
506 }, | |
507 'patchset-false-reject-count': 0, | |
508 'patchset-total-commit-queue-durations': { | |
509 '10': 0.0, | |
510 '25': 0.0, | |
511 '50': 0.0, | |
512 '75': 0.0, | |
513 '90': 0.0, | |
514 '95': 0.0, | |
515 '99': 0.0, | |
516 'max': 0, | |
517 'mean': 0.0, | |
518 'min': 0, | |
519 'size': 1 | |
520 }, | |
521 'rejected-patches': set([]), | |
522 'rejections': [], | |
523 'tree': {'open': 0.0, 'total': 0.0}, | |
524 'trybot-bot-false-reject-count': 2, | |
525 'trybot-bot-pass-count': 1, | |
526 'usage': {} | |
527 }, | |
528 'previous': { | |
529 'attempt-count': 3, | |
530 'attempt-durations': { | |
531 '10': 1012.34567, | |
532 '25': 2512.34567, | |
533 '50': 5012.34567, | |
534 '75': 7512.34567, | |
535 '90': 9012.34567, | |
536 '95': 9512.34567, | |
537 '99': 9912.34567, | |
538 'max': 9999.99999, | |
539 'mean': 5555.555555, | |
540 'min': 1e-05, | |
541 'size': 10000 | |
542 }, | |
543 'attempt-false-reject-count': 0, | |
544 'attempt-reject-count': 0, | |
545 'begin': datetime.datetime(2014, 11, 4, 23, 15), | |
546 'end': datetime.datetime(2014, 11, 4, 23, 30), | |
547 'failed-commit': [], | |
548 'failed-jobs': [], | |
549 'failed-patch': [], | |
550 'failed-presubmit-bot': [], | |
551 'failed-presubmit-check': [], | |
552 'failed-to-trigger': [], | |
553 'failed-unknown': [], | |
554 'false-rejections': [], | |
555 'invalid-delimiter': [], | |
556 'issue-count': 0, | |
557 'jobs': {'bot': {'false-reject-count': 2, 'pass-count': 1}}, | |
558 'manual-cancel': [], | |
559 'missing-lgtm': [], | |
560 'not-lgtm': [], | |
561 'patch_stats': {}, | |
562 'patchset-attempts': { | |
563 '10': 0.0, | |
564 '25': 0.0, | |
565 '50': 0.0, | |
566 '75': 0.0, | |
567 '90': 0.0, | |
568 '95': 0.0, | |
569 '99': 0.0, | |
570 'max': 0, | |
571 'mean': 0.0, | |
572 'min': 0, | |
573 'size': 1 | |
574 }, | |
575 'patchset-commit-count': 0, | |
576 'patchset-committed-attempts': { | |
577 '10': 0.0, | |
578 '25': 0.0, | |
579 '50': 0.0, | |
580 '75': 0.0, | |
581 '90': 0.0, | |
582 '95': 0.0, | |
583 '99': 0.0, | |
584 'max': 0, | |
585 'mean': 0.0, | |
586 'min': 0, | |
587 'size': 1 | |
588 }, | |
589 'patchset-committed-durations': { | |
590 '10': 0.0, | |
591 '25': 0.0, | |
592 '50': 0.0, | |
593 '75': 0.0, | |
594 '90': 0.0, | |
595 '95': 0.0, | |
596 '99': 0.0, | |
597 'max': 0, | |
598 'mean': 0.0, | |
599 'min': 0, | |
600 'size': 1 | |
601 }, | |
602 'patchset-count': 0, | |
603 'patchset-durations': { | |
604 '10': 0.0, | |
605 '25': 0.0, | |
606 '50': 0.0, | |
607 '75': 0.0, | |
608 '90': 0.0, | |
609 '95': 0.0, | |
610 '99': 0.0, | |
611 'max': 0, | |
612 'mean': 0.0, | |
613 'min': 0, | |
614 'size': 1 | |
615 }, | |
616 'patchset-false-reject-count': 0, | |
617 'patchset-total-commit-queue-durations': { | |
618 '10': 0.0, | |
619 '25': 0.0, | |
620 '50': 0.0, | |
621 '75': 0.0, | |
622 '90': 0.0, | |
623 '95': 0.0, | |
624 '99': 0.0, | |
625 'max': 0, | |
626 'mean': 0.0, | |
627 'min': 0, | |
628 'size': 1 | |
629 }, | |
630 'rejected-patches': set([]), | |
631 'rejections': [], | |
632 'tree': {'open': 0.0, 'total': 0.0}, | |
633 'trybot-bot-false-reject-count': 2, | |
634 'trybot-bot-pass-count': 1, | |
635 'usage': {} | |
636 } | |
637 }, result) | |
638 | |
639 # Test that the result stats have the minimal expected dict keys | |
640 # for print_stats(). | |
pgervais
2015/05/05 16:22:58
This looks like useless after the giant assert abo
| |
641 expected_keys = set(cq_stats.default_stats().keys()) | |
642 self.assertFalse(expected_keys - set(result['latest'].keys())) | |
643 self.assertFalse(expected_keys - set(result['previous'].keys())) | |
644 | |
645 self.assertIsNone(cq_stats.organize_stats({})) | |
646 | |
647 def test_derive_list_stats(self): | |
648 series = range(100) | |
649 stats = cq_stats.derive_list_stats(series) | |
650 self.assertDictEqual({ | |
Sergey Berezin
2015/05/05 17:06:19
Beware of dangers of float arithmetic - it's not e
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
651 '10': 9.9000000000000004, | |
652 '25': 24.75, | |
653 '50': 49.5, | |
654 '75': 74.25, | |
655 '90': 89.100000000000009, | |
656 '95': 94.049999999999997, | |
657 '99': 98.010000000000005, | |
658 'max': 99, | |
659 'mean': 49.5, | |
660 'min': 0, | |
661 'size': 100 | |
662 }, stats) | |
663 | |
664 self.assertEqual(cq_stats.derive_list_stats([])['size'], 1) | |
665 | |
666 def mock_derive_patch_stats(self, _, patch_id): | |
667 # The original function expects patch_id to be a 2-tuple. | |
668 self.assertIsInstance(patch_id, tuple) | |
669 self.assertEqual(len(patch_id), 2) | |
670 # Note: these fields are required by derive_stats(). Make sure | |
671 # they are present in the unit tests for derive_patch_stats(). | |
672 stats = { | |
673 'attempts': 3, | |
674 'false-rejections': 1, | |
675 'rejections': 2, | |
676 'committed': True, | |
677 'patchset-duration-wallclock': 1234.56, | |
678 'patchset-duration': 999.99, | |
679 'failed-jobs-details': {'tester': 2}, | |
680 } | |
681 return patch_id, stats | |
682 | |
683 def test_derive_stats(self): | |
684 # Unused args: pylint: disable=W0613 | |
685 def mock_fetch_cq_logs_0(begin_date=None, end_date=None, filters=None): | |
686 return [] | |
687 # Unused args: pylint: disable=W0613 | |
688 def mock_fetch_cq_logs(begin_date=None, end_date=None, filters=None): | |
689 return [ | |
690 {'fields': {'issue': 12345, 'patchset': 1}, | |
691 'timestamp': 1415150483.18568, | |
692 }, | |
693 ] | |
694 | |
695 self.mock(cq_stats, 'derive_patch_stats', self.mock_derive_patch_stats) | |
696 # Test empty logs. | |
697 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs_0) | |
698 self.assertEqual(dict, type(cq_stats.derive_stats( | |
699 Args(), datetime.datetime(2014, 10, 15)))) | |
700 # Non-empty logs. | |
701 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs) | |
702 self.assertEqual(dict, type(cq_stats.derive_stats( | |
703 Args(seq=False), datetime.datetime(2014, 10, 15)))) | |
704 self.assertEqual(dict, type(cq_stats.derive_stats( | |
705 Args(seq=True), datetime.datetime(2014, 10, 15)))) | |
706 | |
707 def test_stats_by_count_entry(self): | |
708 common = {'failed-jobs-details': 'jobs', 'reason1': 2, 'reason2': 3} | |
709 patch_stats = {'some-count': 5} | |
710 patch_stats.update(common) | |
711 expected = {'count': 5, 'patch_id': 'patch'} | |
712 expected.update(common) | |
713 self.assertEqual(expected, cq_stats.stats_by_count_entry( | |
714 patch_stats, 'some-count', 'patch', ['reason1', 'reason2'])) | |
715 | |
716 def test_parse_json(self): | |
717 self.assertEqual({'a': 5}, cq_stats.parse_json('{"a": 5}')) | |
718 self.assertEqual({'a': 5}, cq_stats.parse_json({'a': 5})) | |
719 self.assertEqual('bad json)}', cq_stats.parse_json('bad json)}')) | |
720 self.assertEqual({}, cq_stats.parse_json('bad json)}', return_type=dict)) | |
721 | |
722 def test_parse_failing_tryjobs(self): | |
723 message = ( | |
724 'Try jobs failed on following builders:\n' | |
725 ' try_rel on tryserver.fake (http://url.com/8633)\n' | |
726 ' dont_try_rel on tryserver.fake (http://url.com/8634)') | |
727 self.assertEqual(['try_rel', 'dont_try_rel'], | |
728 cq_stats.parse_failing_tryjobs(message)) | |
729 self.assertEqual([], cq_stats.parse_failing_tryjobs('')) | |
730 self.assertEqual([], cq_stats.parse_failing_tryjobs('single line')) | |
731 self.assertEqual([], cq_stats.parse_failing_tryjobs('empty line\n\n')) | |
732 | |
733 def test_derive_patch_stats(self): | |
734 time_obj = {'time': 1415150492.4} | |
735 def attempt(message, commit=False, reason=''): | |
736 time_obj['time'] += 1.37 # Trick python to use global var. | |
737 entries = [] | |
738 entries.append({'fields': {'action': 'patch_start'}, | |
739 'timestamp': time_obj['time']}) | |
740 time_obj['time'] += 1.37 | |
741 if commit: | |
742 entries.append({'fields': {'action': 'patch_committed'}, | |
743 'timestamp': time_obj['time']}) | |
744 else: | |
745 entries.append({'fields': {'action': 'patch_failed', | |
746 'reason': {'fail_type': reason}}, | |
747 'timestamp': time_obj['time']}) | |
748 time_obj['time'] += 1.37 | |
749 entries.append({'fields': {'action': 'patch_stop', 'message': message}, | |
750 'timestamp': time_obj['time']}) | |
751 return entries | |
752 | |
753 attempts = [ | |
754 attempt('CQ bit was unchecked on CL'), | |
755 attempt('No LGTM from valid reviewers', reason='reviewer_lgtm'), | |
756 attempt('A disapproval has been posted'), | |
757 attempt('Transient error: Invalid delimiter'), | |
758 attempt('Failed to commit', reason='commit'), | |
759 attempt('Failed to apply patch'), | |
760 attempt('Presubmit check'), | |
761 attempt('Try jobs failed:\n test_dbg', reason='simple try job'), | |
762 attempt('Try jobs failed:\n chromium_presubmit'), | |
763 attempt('Exceeded time limit waiting for builds to trigger'), | |
764 attempt('Some totally random unknown reason') + [ | |
765 {'fields': {'action': 'random garbage'}, | |
766 'timestamp': time_obj['time'] + 0.5}], | |
767 attempt('', commit=True), | |
768 ] | |
769 | |
770 # Dangerous default value, unused args: pylint: disable=W0102,W0613 | |
771 def mock_fetch_cq_logs(begin_date=None, end_date=None, filters=[]): | |
772 entries = list(itertools.chain(*attempts)) | |
773 entries.reverse() | |
774 return entries | |
775 | |
776 # Dangerous default value, unused args: pylint: disable=W0102,W0613 | |
777 def mock_fetch_cq_logs_0(begin_date=None, end_date=None, filters=[]): | |
778 return [] | |
779 | |
780 # Dangerous default value, unused args: pylint: disable=W0102,W0613 | |
781 def mock_fetch_cq_logs_junk(begin_date=None, end_date=None, filters=[]): | |
782 return [{'fields': {'action': 'cq_start'}, 'timestamp': 1415150662.3}] | |
783 | |
784 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs) | |
785 | |
786 patch_id = ('pid', 5) | |
787 pid, stats = cq_stats.derive_patch_stats( | |
788 datetime.datetime(2014, 10, 15), patch_id) | |
789 self.assertEqual(patch_id, pid) | |
790 # Check required fields in the result. | |
791 for k in self.mock_derive_patch_stats(None, patch_id)[1]: | |
792 self.assertIsNotNone(stats.get(k)) | |
793 # A few sanity checks. | |
794 self.assertEqual(stats['attempts'], len(attempts)) | |
795 self.assertEqual(stats['committed'], True) | |
796 self.assertGreater(stats['false-rejections'], 0) | |
797 | |
798 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs_0) | |
799 pid, stats = cq_stats.derive_patch_stats( | |
800 datetime.datetime(2014, 10, 15), patch_id) | |
801 # Cover the case when there are actions, but no CQ attempts. | |
802 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs_junk) | |
803 pid, stats = cq_stats.derive_patch_stats( | |
804 datetime.datetime(2014, 10, 15), patch_id) | |
805 | |
806 | |
807 def test_derive_tree_stats(self): | |
808 def makeDate(days=0, hours=0, minutes=0, seconds=0): | |
809 start_date = datetime.datetime(2014, 10, 1, 15, 20, 12, 345) | |
810 return start_date + datetime.timedelta( | |
811 days=days, seconds=hours*3600+minutes*60+seconds) | |
812 | |
813 events = [ | |
814 {'date': makeDate(-1), | |
815 'open': True}, | |
816 {'date': makeDate(0, 12, 35, 11), | |
817 'open': False}, | |
818 {'date': makeDate(0, 12, 45, 53), | |
819 'open': True}, | |
820 {'date': makeDate(0, 23, 59, 51), | |
821 'open': False}, | |
822 {'date': makeDate(0, 23, 59, 55), | |
823 'open': True}, | |
824 {'date': makeDate(1, 3, 43, 32), | |
825 'open': False}, | |
826 ] | |
827 # pylint: disable=unused-argument | |
828 def mock_fetch(_project, end_date, _start_date=None, limit=1000): | |
829 return [e for e in events if e['date'] <= end_date] | |
830 | |
831 self.mock(cq_stats, 'fetch_tree_status', mock_fetch) | |
832 self.assertEqual( | |
833 cq_stats.derive_tree_stats('project', makeDate(0), makeDate(1)), | |
834 {'open': 85754.0, 'total': 3600.0 * 24}) | |
835 self.assertEqual( | |
836 cq_stats.derive_tree_stats('project', makeDate(0), makeDate(2)), | |
837 {'open': 99166.0, 'total': 3600.0 * 24 * 2}) | |
838 | |
839 def empty_fetch(_project, end_date, _start_date=None, limit=1000): | |
840 return [] | |
841 self.mock(cq_stats, 'fetch_tree_status', empty_fetch) | |
842 self.assertEqual( | |
843 cq_stats.derive_tree_stats('project', makeDate(0), makeDate(1)), | |
844 {'open': 0.0, 'total': 3600.0 * 24}) | |
845 | |
846 def test_print_attempt_counts(self): | |
847 self.mock(cq_stats, 'output', self.print_mock) | |
848 | |
849 stats = cq_stats.default_stats() | |
850 stats['patch_stats'] = { | |
851 (123, 1): { | |
852 'attempts': 1, | |
853 'false-rejections': 0, | |
854 'rejections': 1, | |
855 'committed': False, | |
856 'patchset-duration': 3600, | |
857 'patchset-duration-wallclock': 3600, | |
858 'failed-jobs-details': { | |
859 'builder_a': 1, | |
860 }, | |
861 }, | |
862 } | |
863 cq_stats._derive_stats_from_patch_stats(stats) | |
864 | |
865 cq_stats.print_attempt_counts( | |
866 stats, 'rejections', 'were unsuccessful', | |
867 item_name=None, committed=False, details=True) | |
868 | |
869 cq_stats.print_attempt_counts( | |
870 stats, 'rejections', 'failed jobs', | |
871 item_name=None, committed=False) | |
Sergey Berezin
2015/05/05 17:06:18
return self.expectations
Otherwise you're not tes
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
872 | |
873 def test_print_duration(self): | |
874 self.mock(cq_stats, 'output', self.print_mock) | |
875 | |
Sergey Berezin
2015/05/05 17:06:19
nit: stray spaces on empty line.
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
876 cq_stats.print_duration('mean', Args(), cq_stats.default_stats(), None) | |
Sergey Berezin
2015/05/05 17:06:18
return self.expectations
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
877 | |
878 def test_print_usage(self): | |
879 self.mock(cq_stats, 'output', self.print_mock) | |
880 | |
881 stats = cq_stats.default_stats() | |
882 stats['usage'] = cq_stats.derive_log_stats([], []) | |
883 cq_stats.print_usage(Args(), stats, stats) | |
884 | |
885 stats['usage']['bot_manual_commits'] += 1 | |
886 cq_stats.print_usage(Args(), stats, stats) | |
Sergey Berezin
2015/05/05 17:06:18
return self.expectations
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
887 | |
888 # Expectation: must print stats in a certain format. | |
889 # Assumption: input stats at minimum have the keys from | |
890 # default_stats(). This is verified in test_organize_stats(). | |
891 def test_print_stats(self): | |
892 self.mock(cq_stats, 'output', self.print_mock) | |
893 args = Args() | |
894 stats_set = cq_stats.default_stats() | |
895 stats_set['begin'] = args.date | |
896 stats_set['end'] = args.date + datetime.timedelta(days=7) | |
897 | |
898 stats_set['jobs'].update({ | |
899 'foo_builder': { | |
900 'pass-count': 100, | |
901 'false-reject-count': 1, | |
902 }, | |
903 }) | |
904 | |
905 swapped_stats = copy.deepcopy(stats_set) | |
906 swapped_stats['begin'], swapped_stats['end'] = ( | |
907 swapped_stats['end'], swapped_stats['begin']) | |
908 | |
909 cq_stats.print_stats(args, {'latest': None, 'previous': stats_set}) | |
910 cq_stats.print_stats(args, {'latest': stats_set, 'previous': None}) | |
911 cq_stats.print_stats(args, {'latest': swapped_stats, 'previous': stats_set}) | |
912 cq_stats.print_stats(args, {'latest': stats_set, 'previous': stats_set}) | |
913 return self.expectations | |
914 | |
915 def test_print_log_stats(self): | |
916 self.mock(cq_stats, 'output', self.print_mock) | |
917 args = Args(use_logs=True) | |
918 stats_set = cq_stats.default_stats() | |
919 stats_set['begin'] = args.date | |
920 stats_set['end'] = args.date + datetime.timedelta(days=7) | |
921 | |
922 cq_stats.print_stats(args, {'latest': stats_set, 'previous': stats_set}) | |
Sergey Berezin
2015/05/05 17:06:18
return self.expectations
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
923 | |
924 def test_acquire_stats(self): | |
pgervais
2015/05/05 16:22:58
Without looking at the content of cq_stats.acquire
Sergey Berezin
2015/05/05 17:06:19
I disagree with #pragma: no cover - a smoke test t
pgervais
2015/05/05 21:34:04
What I meant is that with that amount of mocking,
Paweł Hajdan Jr.
2015/05/06 11:09:15
I agree the usefulness of this test is limited. I
| |
925 self.mock(cq_stats, 'fetch_json', lambda _: 'json') | |
926 self.mock(cq_stats, 'organize_stats', | |
927 lambda *_args, **_kwargs: { | |
928 'latest': cq_stats.default_stats(), | |
929 'previous': cq_stats.default_stats()}) | |
930 self.mock(cq_stats, 'derive_stats', lambda *_args, **_kwargs: {}) | |
931 self.mock(cq_stats, 'derive_tree_stats', | |
932 lambda *_: {'open': 0.0, 'total': 3600.0}) | |
933 self.mock(cq_stats, 'derive_git_stats', lambda *_: {}) | |
934 self.mock(cq_stats, 'derive_svn_stats', lambda *_: {}) | |
935 | |
936 cq_stats.acquire_stats(Args(project='blink', bots=[])) | |
937 cq_stats.acquire_stats(Args(project='chromium', bots=[])) | |
938 cq_stats.acquire_stats(Args( | |
939 project='chromium', bots=[], use_logs=True, range='week')) | |
940 cq_stats.acquire_stats(Args( | |
941 project='chromium', bots=[], use_logs=True, range='day')) | |
942 cq_stats.acquire_stats(Args( | |
943 project='chromium', bots=[], use_logs=True, range='hour')) | |
944 | |
945 def test_main(self): | |
946 self.mock(cq_stats, 'output', self.print_mock) | |
947 self.mock(cq_stats, 'parse_args', lambda: Args( | |
948 project='chromium', log_level=logging.CRITICAL, logs_black_list=None)) | |
949 self.mock(cq_stats, 'acquire_stats', lambda _: cq_stats.default_stats()) | |
950 cq_stats.main() | |
Sergey Berezin
2015/05/05 17:06:18
return self.expectations
Paweł Hajdan Jr.
2015/05/06 11:09:15
Done.
| |
OLD | NEW |