OLD | NEW |
(Empty) | |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 import argparse |
| 6 import collections |
| 7 import copy |
| 8 import datetime |
| 9 import itertools |
| 10 import logging |
| 11 import subprocess |
| 12 import time |
| 13 import unittest |
| 14 import urllib2 |
| 15 |
| 16 import dateutil |
| 17 |
| 18 from testing_support import auto_stub |
| 19 |
| 20 from infra.tools.cq_stats import cq_stats |
| 21 |
| 22 |
| 23 class Args(object): |
| 24 def __init__(self, **kwargs): |
| 25 self.project = 'test_project' |
| 26 self.list_rejections = False |
| 27 self.list_false_rejections = False |
| 28 self.use_logs = False |
| 29 self.date = datetime.datetime(2014, 1, 1) |
| 30 self.range = 'week' |
| 31 self.verbose = 'error' |
| 32 self.seq = 'false' |
| 33 self.thread_pool = 3 |
| 34 for name, val in kwargs.iteritems(): |
| 35 self.__dict__[name] = val |
| 36 |
| 37 |
| 38 class ResponseMock(object): |
| 39 """Mock out Response class for urllib2.urlopen().""" |
| 40 def __init__(self, lines, retries): |
| 41 self.lines = lines |
| 42 self.retries = retries |
| 43 |
| 44 def read(self): |
| 45 return '\n'.join(self.lines) |
| 46 |
| 47 def __iter__(self): |
| 48 return self.lines.__iter__() |
| 49 |
| 50 |
| 51 def urlopen_mock(lines, retries=0): |
| 52 obj = ResponseMock(lines, retries) |
| 53 def func(_): |
| 54 if obj.retries: |
| 55 obj.retries -= 1 |
| 56 raise IOError('mock error') |
| 57 return obj |
| 58 return func |
| 59 |
| 60 |
| 61 def ensure_serializable(obj): |
| 62 if isinstance(obj, dict): |
| 63 return {ensure_serializable(k): ensure_serializable(v) |
| 64 for k, v in obj.iteritems()} |
| 65 elif isinstance(obj, (list, set)): |
| 66 return [ensure_serializable(i) for i in obj] |
| 67 elif isinstance(obj, datetime.datetime): |
| 68 return obj.isoformat() |
| 69 elif isinstance(obj, float): |
| 70 # Ensure consistent float results - generally float arithmetic |
| 71 # can be slightly different between CPUs and implementations. |
| 72 return round(obj, 2) |
| 73 else: |
| 74 return obj |
| 75 |
| 76 |
| 77 class TestCQStats(auto_stub.TestCase): |
| 78 def setUp(self): |
| 79 super(TestCQStats, self).setUp() |
| 80 self.expectations = [] |
| 81 |
| 82 def tearDown(self): |
| 83 self.expectations = [] |
| 84 super(TestCQStats, self).tearDown() |
| 85 |
| 86 def print_mock(self, fmt='', *args): |
| 87 # Make sure lines are correctly split when \n is in the string. |
| 88 # This preserves the expectations when going from |
| 89 # print;print('string') to print('\nstring'). |
| 90 self.expectations += ((fmt + '\n') % args).splitlines() |
| 91 |
| 92 def test_output(self): |
| 93 cq_stats.output('') |
| 94 |
| 95 def test_parse_args(self): |
| 96 self.mock(argparse.ArgumentParser, 'parse_args', |
| 97 lambda *_: Args(date='2014-01-01')) |
| 98 self.assertIsNotNone(cq_stats.parse_args()) |
| 99 self.mock(argparse.ArgumentParser, 'parse_args', |
| 100 lambda *_: Args(date=None)) |
| 101 self.assertIsNotNone(cq_stats.parse_args()) |
| 102 |
| 103 def test_date_from_string(self): |
| 104 self.assertRaises(ValueError, cq_stats.date_from_string, 'bad time') |
| 105 self.assertEqual(cq_stats.date_from_string('2014-10-15'), |
| 106 datetime.datetime(2014, 10, 15)) |
| 107 |
| 108 def test_date_from_timestamp(self): |
| 109 self.assertIs(type(cq_stats.date_from_timestamp(12345678.9)), |
| 110 datetime.datetime) |
| 111 |
| 112 def test_date_from_git(self): |
| 113 self.assertIsNone(cq_stats.date_from_git('')) |
| 114 self.assertIsNone(cq_stats.date_from_git('bad time')) |
| 115 self.assertEqual(cq_stats.date_from_git('Tue Oct 21 22:38:39 2014'), |
| 116 datetime.datetime(2014, 10, 21, 22, 38, 39)) |
| 117 |
| 118 def test_fetch_json(self): |
| 119 self.mock(time, 'sleep', lambda n: None) |
| 120 |
| 121 self.mock(urllib2, 'urlopen', urlopen_mock(['{"a": "b"}'])) |
| 122 self.assertEqual(cq_stats.fetch_json('https://'), {'a': 'b'}) |
| 123 |
| 124 self.mock(urllib2, 'urlopen', urlopen_mock(['{"a": "b"}'], retries=1)) |
| 125 self.assertEqual(cq_stats.fetch_json('https://'), {'a': 'b'}) |
| 126 |
| 127 self.mock(urllib2, 'urlopen', urlopen_mock(['{"a": "b"}'], retries=100)) |
| 128 self.assertEqual(cq_stats.fetch_json('https://'), {'error': '404'}) |
| 129 |
| 130 self.mock(urllib2, 'urlopen', urlopen_mock(['{([bad json'])) |
| 131 self.assertEqual(cq_stats.fetch_json('https://'), {'error': '404'}) |
| 132 |
| 133 def test_fetch_tree_status(self): |
| 134 # Invalid result |
| 135 self.mock(cq_stats, 'fetch_json', lambda url: {}) |
| 136 self.assertEqual([], cq_stats.fetch_tree_status( |
| 137 'chromium', datetime.datetime(2014, 10, 15))) |
| 138 # Valid result |
| 139 res = [{'date': '2014-10-01 14:54:44.553', |
| 140 'general_state': 'open'}, |
| 141 {'date': '2014-10-14 10:54:44', |
| 142 'general_state': 'closed'}, |
| 143 {'date': '2014-10-16 10:54:44', |
| 144 'general_state': 'closed'}, |
| 145 ] |
| 146 self.mock(cq_stats, 'fetch_json', lambda url: res) |
| 147 |
| 148 status1 = cq_stats.fetch_tree_status( |
| 149 'chromium', datetime.datetime(2014, 10, 15)) |
| 150 |
| 151 status2 = cq_stats.fetch_tree_status( |
| 152 'chromium', datetime.datetime(2014, 10, 17), |
| 153 start_date= datetime.datetime(2014, 10, 15)) |
| 154 |
| 155 return map(ensure_serializable, [status1, status2]) |
| 156 |
| 157 def test_fetch_git_page(self): |
| 158 self.mock(urllib2, 'urlopen', urlopen_mock(['{([bad json'])) |
| 159 self.assertEqual({}, cq_stats.fetch_git_page('url')) |
| 160 self.mock(urllib2, 'urlopen', urlopen_mock([ |
| 161 ")]}'", '{"json": 1}', |
| 162 ])) |
| 163 self.assertEqual({'json': 1}, cq_stats.fetch_git_page('url')) |
| 164 self.assertEqual({'json': 1}, |
| 165 cq_stats.fetch_git_page('url', cursor='cursor')) |
| 166 |
| 167 def test_fetch_git_logs(self): |
| 168 pages = [ |
| 169 {'log': [ |
| 170 {'author': {'email': 'noone@chromium.org'}, |
| 171 'committer': {'email': 'commit-bot@chromium.org', |
| 172 'time': 'Tue Dec 23 22:38:39 2014'}}, |
| 173 {'author': {'email': 'noone@chromium.org'}, |
| 174 'committer': {'email': 'commit-bot@chromium.org', |
| 175 'time': 'Tue Nov 23 22:38:39 2014'}}, |
| 176 {'author': {'email': 'someone@chromium.org'}, |
| 177 'committer': {'email': 'anyone@chromium.org', |
| 178 'time': 'Tue Oct 22 22:38:39 2014'}}, |
| 179 {'author': {'email': 'blink-deps-roller@chromium.org'}, |
| 180 'committer': {'email': 'commit-bot@chromium.org', |
| 181 'time': 'Tue Oct 21 23:38:39 2014'}}, |
| 182 {'author': {'email': 'blink-deps-roller@chromium.org'}, |
| 183 'committer': {'email': 'blink-deps-roller@chromium.org', |
| 184 'time': 'Tue Oct 21 22:38:39 2014'}} |
| 185 ], |
| 186 'next': 1, |
| 187 }, |
| 188 {'log': [ |
| 189 {'author': {'email': 'someone@chromium.org'}, |
| 190 'committer': {'email': 'anyone@chromium.org'}}, |
| 191 {'author': {'email': 'nobody@chromium.org'}, |
| 192 'committer': {'email': 'commit-bot@chromium.org', |
| 193 'time': 'Tue Sep 21 22:38:39 2014'}}, |
| 194 ], |
| 195 }, |
| 196 ] |
| 197 # Unused arguments: pylint: disable=W0613 |
| 198 def fetch_mock(repo_url, cursor=None, page_size=2000): |
| 199 if not cursor: |
| 200 cursor = 0 |
| 201 return pages[int(cursor)] |
| 202 |
| 203 self.mock(cq_stats, 'fetch_git_page', fetch_mock) |
| 204 |
| 205 data = cq_stats.fetch_git_logs( |
| 206 'chromium', |
| 207 datetime.datetime(2014, 10, 1), |
| 208 datetime.datetime(2014, 12, 1)) |
| 209 |
| 210 derived_data = cq_stats.derive_git_stats( |
| 211 'chromium', |
| 212 datetime.datetime(2014, 9, 1), |
| 213 datetime.datetime(2014, 12, 1), |
| 214 ['blink-deps-roller@chromium.org']) |
| 215 |
| 216 return map(ensure_serializable, [data, derived_data]) |
| 217 |
| 218 def test_fetch_svn_logs(self): |
| 219 xml = """<?xml version="1.0" encoding="UTF-8"?> |
| 220 <log> |
| 221 <logentry |
| 222 revision="184775"> |
| 223 <author>amikhaylova@google.com</author> |
| 224 <date>2014-11-01T20:49:20.468030Z</date> |
| 225 <msg>Move Promise Tracker out of hidden experiments. |
| 226 |
| 227 BUG=348919 |
| 228 |
| 229 Review URL: https://codereview.chromium.org/697833002</msg> |
| 230 <revprops> |
| 231 <property |
| 232 name="commit-bot">commit-bot@chromium.org</property> |
| 233 </revprops> |
| 234 </logentry> |
| 235 <logentry |
| 236 revision="184774"> |
| 237 <author>amikhaylova@google.com</author> |
| 238 <date>2014-11-01T20:49:20.468030Z</date> |
| 239 <msg>Move Promise Tracker out of hidden experiments. |
| 240 |
| 241 BUG=348919 |
| 242 |
| 243 Review URL: https://codereview.chromium.org/697833002</msg> |
| 244 <revprops> |
| 245 <property |
| 246 name="foo">bar</property> |
| 247 </revprops> |
| 248 </logentry> |
| 249 <logentry |
| 250 revision="184773"> |
| 251 <author>amikhaylova@google.com</author> |
| 252 <date>2014-11-01T20:49:20.468030Z</date> |
| 253 <msg>Move Promise Tracker out of hidden experiments. |
| 254 |
| 255 BUG=348919 |
| 256 |
| 257 Review URL: https://codereview.chromium.org/697833002</msg> |
| 258 </logentry> |
| 259 </log> |
| 260 """ |
| 261 self.mock(subprocess, 'check_output', lambda *_: xml) |
| 262 data = cq_stats.fetch_svn_logs( |
| 263 'chromium', |
| 264 datetime.datetime(2014, 1, 1), |
| 265 datetime.datetime(2014, 1, 1)) |
| 266 |
| 267 derived_data = cq_stats.derive_svn_stats( |
| 268 'chromium', |
| 269 datetime.datetime(2014, 1, 1), |
| 270 datetime.datetime(2014, 1, 1), |
| 271 []) |
| 272 |
| 273 return map(ensure_serializable, [data, derived_data]) |
| 274 |
| 275 def test_fetch_stats(self): |
| 276 self.mock(cq_stats, 'fetch_json', lambda _: 'json') |
| 277 self.assertEqual('json', cq_stats.fetch_stats(Args())) |
| 278 self.assertEqual('json', cq_stats.fetch_stats(Args(date=None))) |
| 279 self.assertEqual('json', cq_stats.fetch_stats( |
| 280 Args(), datetime.datetime(2014, 10, 15))) |
| 281 self.assertEqual('json', cq_stats.fetch_stats( |
| 282 Args(), datetime.datetime(2014, 10, 15), 'day')) |
| 283 |
| 284 def test_fetch_cq_logs(self): |
| 285 def mkresults(series): |
| 286 return [{'a': n} for n in series] |
| 287 pages_default = [ |
| 288 {'more': True, |
| 289 'cursor': '!@#$%^', |
| 290 'results': mkresults(range(1, 3)), |
| 291 }, |
| 292 {'more': False, |
| 293 'results': mkresults(range(3, 6)), |
| 294 }, |
| 295 ] |
| 296 expected_result = mkresults(range(1, 6)) |
| 297 |
| 298 start_date = datetime.datetime(2014, 10, 15) |
| 299 end_date = datetime.datetime(2014, 10, 20) |
| 300 pages = [] |
| 301 |
| 302 def fetch_json_mock(_): |
| 303 return pages.pop(0) |
| 304 |
| 305 self.mock(cq_stats, 'fetch_json', fetch_json_mock) |
| 306 pages[:] = pages_default |
| 307 self.assertEqual(cq_stats.fetch_cq_logs(), expected_result) |
| 308 pages[:] = pages_default |
| 309 self.assertEqual(cq_stats.fetch_cq_logs(start_date=start_date), |
| 310 expected_result) |
| 311 pages[:] = pages_default |
| 312 self.assertEqual(cq_stats.fetch_cq_logs(end_date=end_date), |
| 313 expected_result) |
| 314 |
| 315 def test_organize_stats(self): |
| 316 stats = {'results': [ |
| 317 {'begin': t, |
| 318 'stats': [ |
| 319 {'count': 3, 'type': 'count', |
| 320 'name': 'attempt-count'}, |
| 321 {'count': 2, 'type': 'count', |
| 322 'name': 'trybot-bot-false-reject-count'}, |
| 323 {'count': 1, 'type': 'count', |
| 324 'name': 'trybot-bot-pass-count'}, |
| 325 {'description': 'Total time spent per CQ attempt.', |
| 326 'max': 9999.99999, |
| 327 'percentile_25': 2512.34567, |
| 328 'percentile_75': 7512.34567, |
| 329 'percentile_10': 1012.34567, |
| 330 'unit': 'seconds', |
| 331 'name': 'attempt-durations', |
| 332 'percentile_50': 5012.34567, |
| 333 'min': 0.00001, |
| 334 'sample_size': 10000, |
| 335 'percentile_90': 9012.34567, |
| 336 'percentile_95': 9512.34567, |
| 337 'percentile_99': 9912.34567, |
| 338 'type': 'list', |
| 339 'mean': 5555.555555}, |
| 340 ], |
| 341 'interval_minutes': 15, |
| 342 'project': 'chromium', |
| 343 'key': 5976204561612800, |
| 344 'end': t + 900} for t in [1415138400, 1415139300]]} |
| 345 |
| 346 result = cq_stats.organize_stats(stats) |
| 347 |
| 348 # Test that the result stats have the minimal expected dict keys |
| 349 # for print_stats(). |
| 350 expected_keys = set(cq_stats.default_stats().keys()) |
| 351 self.assertFalse(expected_keys - set(result['latest'].keys())) |
| 352 self.assertFalse(expected_keys - set(result['previous'].keys())) |
| 353 |
| 354 self.assertIsNone(cq_stats.organize_stats({})) |
| 355 |
| 356 return ensure_serializable(result) |
| 357 |
| 358 def test_derive_list_stats(self): |
| 359 series = range(100) |
| 360 stats = cq_stats.derive_list_stats(series) |
| 361 # Ensure consistent float results - generally float arithmetic |
| 362 # can be slightly different between CPUs and implementations. |
| 363 stats = {k: round(v, 2) for k, v in stats.iteritems()} |
| 364 self.assertDictEqual({ |
| 365 '10': 9.9, |
| 366 '25': 24.75, |
| 367 '50': 49.5, |
| 368 '75': 74.25, |
| 369 '90': 89.1, |
| 370 '95': 94.05, |
| 371 '99': 98.01, |
| 372 'max': 99.0, |
| 373 'mean': 49.5, |
| 374 'min': 0.0, |
| 375 'size': 100.0, |
| 376 }, stats) |
| 377 |
| 378 self.assertEqual(cq_stats.derive_list_stats([])['size'], 1) |
| 379 |
| 380 def mock_derive_patch_stats(self, _, patch_id): |
| 381 # The original function expects patch_id to be a 2-tuple. |
| 382 self.assertIsInstance(patch_id, tuple) |
| 383 self.assertEqual(len(patch_id), 2) |
| 384 # Note: these fields are required by derive_stats(). Make sure |
| 385 # they are present in the unit tests for derive_patch_stats(). |
| 386 stats = { |
| 387 'attempts': 3, |
| 388 'false-rejections': 1, |
| 389 'rejections': 2, |
| 390 'committed': True, |
| 391 'patchset-duration-wallclock': 1234.56, |
| 392 'patchset-duration': 999.99, |
| 393 'failed-jobs-details': {'tester': 2}, |
| 394 } |
| 395 return patch_id, stats |
| 396 |
| 397 def test_derive_stats(self): |
| 398 # Unused args: pylint: disable=W0613 |
| 399 def mock_fetch_cq_logs_0(begin_date=None, end_date=None, filters=None): |
| 400 return [] |
| 401 # Unused args: pylint: disable=W0613 |
| 402 def mock_fetch_cq_logs(begin_date=None, end_date=None, filters=None): |
| 403 return [ |
| 404 {'fields': {'issue': 12345, 'patchset': 1}, |
| 405 'timestamp': 1415150483.18568, |
| 406 }, |
| 407 ] |
| 408 |
| 409 self.mock(cq_stats, 'derive_patch_stats', self.mock_derive_patch_stats) |
| 410 # Test empty logs. |
| 411 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs_0) |
| 412 self.assertEqual(dict, type(cq_stats.derive_stats( |
| 413 Args(), datetime.datetime(2014, 10, 15)))) |
| 414 # Non-empty logs. |
| 415 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs) |
| 416 self.assertEqual(dict, type(cq_stats.derive_stats( |
| 417 Args(seq=False), datetime.datetime(2014, 10, 15)))) |
| 418 self.assertEqual(dict, type(cq_stats.derive_stats( |
| 419 Args(seq=True), datetime.datetime(2014, 10, 15)))) |
| 420 |
| 421 def test_stats_by_count_entry(self): |
| 422 common = {'failed-jobs-details': 'jobs', 'reason1': 2, 'reason2': 3} |
| 423 patch_stats = {'some-count': 5} |
| 424 patch_stats.update(common) |
| 425 expected = {'count': 5, 'patch_id': 'patch'} |
| 426 expected.update(common) |
| 427 self.assertEqual(expected, cq_stats.stats_by_count_entry( |
| 428 patch_stats, 'some-count', 'patch', ['reason1', 'reason2'])) |
| 429 |
| 430 def test_parse_json(self): |
| 431 self.assertEqual({'a': 5}, cq_stats.parse_json('{"a": 5}')) |
| 432 self.assertEqual({'a': 5}, cq_stats.parse_json({'a': 5})) |
| 433 self.assertEqual('bad json)}', cq_stats.parse_json('bad json)}')) |
| 434 self.assertEqual({}, cq_stats.parse_json('bad json)}', return_type=dict)) |
| 435 |
| 436 def test_parse_failing_tryjobs(self): |
| 437 message = ( |
| 438 'Try jobs failed on following builders:\n' |
| 439 ' try_rel on tryserver.fake (http://url.com/8633)\n' |
| 440 ' dont_try_rel on tryserver.fake (http://url.com/8634)') |
| 441 self.assertEqual(['try_rel', 'dont_try_rel'], |
| 442 cq_stats.parse_failing_tryjobs(message)) |
| 443 self.assertEqual([], cq_stats.parse_failing_tryjobs('')) |
| 444 self.assertEqual([], cq_stats.parse_failing_tryjobs('single line')) |
| 445 self.assertEqual([], cq_stats.parse_failing_tryjobs('empty line\n\n')) |
| 446 |
| 447 def test_derive_patch_stats(self): |
| 448 time_obj = {'time': 1415150492.4} |
| 449 def attempt(message, commit=False, reason=''): |
| 450 time_obj['time'] += 1.37 # Trick python to use global var. |
| 451 entries = [] |
| 452 entries.append({'fields': {'action': 'patch_start'}, |
| 453 'timestamp': time_obj['time']}) |
| 454 time_obj['time'] += 1.37 |
| 455 if commit: |
| 456 entries.append({'fields': {'action': 'patch_committed'}, |
| 457 'timestamp': time_obj['time']}) |
| 458 else: |
| 459 entries.append({'fields': {'action': 'patch_failed', |
| 460 'reason': {'fail_type': reason}}, |
| 461 'timestamp': time_obj['time']}) |
| 462 time_obj['time'] += 1.37 |
| 463 entries.append({'fields': {'action': 'patch_stop', 'message': message}, |
| 464 'timestamp': time_obj['time']}) |
| 465 return entries |
| 466 |
| 467 attempts = [ |
| 468 attempt('CQ bit was unchecked on CL'), |
| 469 attempt('No LGTM from valid reviewers', reason='reviewer_lgtm'), |
| 470 attempt('A disapproval has been posted'), |
| 471 attempt('Transient error: Invalid delimiter'), |
| 472 attempt('Failed to commit', reason='commit'), |
| 473 attempt('Failed to apply patch'), |
| 474 attempt('Presubmit check'), |
| 475 attempt('Try jobs failed:\n test_dbg', reason='simple try job'), |
| 476 attempt('Try jobs failed:\n chromium_presubmit'), |
| 477 attempt('Exceeded time limit waiting for builds to trigger'), |
| 478 attempt('Some totally random unknown reason') + [ |
| 479 {'fields': {'action': 'random garbage'}, |
| 480 'timestamp': time_obj['time'] + 0.5}], |
| 481 attempt('', commit=True), |
| 482 ] |
| 483 |
| 484 # Dangerous default value, unused args: pylint: disable=W0102,W0613 |
| 485 def mock_fetch_cq_logs(begin_date=None, end_date=None, filters=[]): |
| 486 entries = list(itertools.chain(*attempts)) |
| 487 entries.reverse() |
| 488 return entries |
| 489 |
| 490 # Dangerous default value, unused args: pylint: disable=W0102,W0613 |
| 491 def mock_fetch_cq_logs_0(begin_date=None, end_date=None, filters=[]): |
| 492 return [] |
| 493 |
| 494 # Dangerous default value, unused args: pylint: disable=W0102,W0613 |
| 495 def mock_fetch_cq_logs_junk(begin_date=None, end_date=None, filters=[]): |
| 496 return [{'fields': {'action': 'cq_start'}, 'timestamp': 1415150662.3}] |
| 497 |
| 498 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs) |
| 499 |
| 500 patch_id = ('pid', 5) |
| 501 pid, stats = cq_stats.derive_patch_stats( |
| 502 datetime.datetime(2014, 10, 15), patch_id) |
| 503 self.assertEqual(patch_id, pid) |
| 504 # Check required fields in the result. |
| 505 for k in self.mock_derive_patch_stats(None, patch_id)[1]: |
| 506 self.assertIsNotNone(stats.get(k)) |
| 507 # A few sanity checks. |
| 508 self.assertEqual(stats['attempts'], len(attempts)) |
| 509 self.assertEqual(stats['committed'], True) |
| 510 self.assertGreater(stats['false-rejections'], 0) |
| 511 |
| 512 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs_0) |
| 513 pid, stats = cq_stats.derive_patch_stats( |
| 514 datetime.datetime(2014, 10, 15), patch_id) |
| 515 # Cover the case when there are actions, but no CQ attempts. |
| 516 self.mock(cq_stats, 'fetch_cq_logs', mock_fetch_cq_logs_junk) |
| 517 pid, stats = cq_stats.derive_patch_stats( |
| 518 datetime.datetime(2014, 10, 15), patch_id) |
| 519 |
| 520 |
| 521 def test_derive_tree_stats(self): |
| 522 def makeDate(days=0, hours=0, minutes=0, seconds=0): |
| 523 start_date = datetime.datetime(2014, 10, 1, 15, 20, 12, 345) |
| 524 return start_date + datetime.timedelta( |
| 525 days=days, seconds=hours*3600+minutes*60+seconds) |
| 526 |
| 527 events = [ |
| 528 {'date': makeDate(-1), |
| 529 'open': True}, |
| 530 {'date': makeDate(0, 12, 35, 11), |
| 531 'open': False}, |
| 532 {'date': makeDate(0, 12, 45, 53), |
| 533 'open': True}, |
| 534 {'date': makeDate(0, 23, 59, 51), |
| 535 'open': False}, |
| 536 {'date': makeDate(0, 23, 59, 55), |
| 537 'open': True}, |
| 538 {'date': makeDate(1, 3, 43, 32), |
| 539 'open': False}, |
| 540 ] |
| 541 # pylint: disable=unused-argument |
| 542 def mock_fetch(_project, end_date, _start_date=None, limit=1000): |
| 543 return [e for e in events if e['date'] <= end_date] |
| 544 |
| 545 self.mock(cq_stats, 'fetch_tree_status', mock_fetch) |
| 546 self.assertEqual( |
| 547 cq_stats.derive_tree_stats('project', makeDate(0), makeDate(1)), |
| 548 {'open': 85754.0, 'total': 3600.0 * 24}) |
| 549 self.assertEqual( |
| 550 cq_stats.derive_tree_stats('project', makeDate(0), makeDate(2)), |
| 551 {'open': 99166.0, 'total': 3600.0 * 24 * 2}) |
| 552 |
| 553 def empty_fetch(_project, end_date, _start_date=None, limit=1000): |
| 554 return [] |
| 555 self.mock(cq_stats, 'fetch_tree_status', empty_fetch) |
| 556 self.assertEqual( |
| 557 cq_stats.derive_tree_stats('project', makeDate(0), makeDate(1)), |
| 558 {'open': 0.0, 'total': 3600.0 * 24}) |
| 559 |
| 560 def test_print_attempt_counts(self): |
| 561 self.mock(cq_stats, 'output', self.print_mock) |
| 562 |
| 563 stats = cq_stats.default_stats() |
| 564 stats['patch_stats'] = { |
| 565 (123, 1): { |
| 566 'attempts': 1, |
| 567 'false-rejections': 0, |
| 568 'rejections': 1, |
| 569 'committed': False, |
| 570 'patchset-duration': 3600, |
| 571 'patchset-duration-wallclock': 3600, |
| 572 'failed-jobs-details': { |
| 573 'builder_a': 1, |
| 574 }, |
| 575 }, |
| 576 } |
| 577 cq_stats._derive_stats_from_patch_stats(stats) |
| 578 |
| 579 cq_stats.print_attempt_counts( |
| 580 stats, 'rejections', 'were unsuccessful', |
| 581 item_name=None, committed=False, details=True) |
| 582 |
| 583 cq_stats.print_attempt_counts( |
| 584 stats, 'rejections', 'failed jobs', |
| 585 item_name=None, committed=False) |
| 586 |
| 587 return self.expectations |
| 588 |
| 589 def test_print_duration(self): |
| 590 self.mock(cq_stats, 'output', self.print_mock) |
| 591 |
| 592 cq_stats.print_duration('mean', Args(), cq_stats.default_stats(), None) |
| 593 return self.expectations |
| 594 |
| 595 def test_print_usage(self): |
| 596 self.mock(cq_stats, 'output', self.print_mock) |
| 597 |
| 598 stats = cq_stats.default_stats() |
| 599 stats['usage'] = cq_stats.derive_log_stats([], []) |
| 600 cq_stats.print_usage(Args(), stats, stats) |
| 601 |
| 602 stats['usage']['bot_manual_commits'] += 1 |
| 603 cq_stats.print_usage(Args(), stats, stats) |
| 604 |
| 605 return self.expectations |
| 606 |
| 607 # Expectation: must print stats in a certain format. |
| 608 # Assumption: input stats at minimum have the keys from |
| 609 # default_stats(). This is verified in test_organize_stats(). |
| 610 def test_print_stats(self): |
| 611 self.mock(cq_stats, 'output', self.print_mock) |
| 612 args = Args() |
| 613 stats_set = cq_stats.default_stats() |
| 614 stats_set['begin'] = args.date |
| 615 stats_set['end'] = args.date + datetime.timedelta(days=7) |
| 616 |
| 617 stats_set['jobs'].update({ |
| 618 'foo_builder': { |
| 619 'pass-count': 100, |
| 620 'false-reject-count': 1, |
| 621 }, |
| 622 }) |
| 623 |
| 624 swapped_stats = copy.deepcopy(stats_set) |
| 625 swapped_stats['begin'], swapped_stats['end'] = ( |
| 626 swapped_stats['end'], swapped_stats['begin']) |
| 627 |
| 628 cq_stats.print_stats(args, {'latest': None, 'previous': stats_set}) |
| 629 cq_stats.print_stats(args, {'latest': stats_set, 'previous': None}) |
| 630 cq_stats.print_stats(args, {'latest': swapped_stats, 'previous': stats_set}) |
| 631 cq_stats.print_stats(args, {'latest': stats_set, 'previous': stats_set}) |
| 632 return self.expectations |
| 633 |
| 634 def test_print_log_stats(self): |
| 635 self.mock(cq_stats, 'output', self.print_mock) |
| 636 args = Args(use_logs=True) |
| 637 stats_set = cq_stats.default_stats() |
| 638 stats_set['begin'] = args.date |
| 639 stats_set['end'] = args.date + datetime.timedelta(days=7) |
| 640 |
| 641 cq_stats.print_stats(args, {'latest': stats_set, 'previous': stats_set}) |
| 642 return self.expectations |
| 643 |
| 644 def test_acquire_stats(self): |
| 645 self.mock(cq_stats, 'fetch_json', lambda _: 'json') |
| 646 self.mock(cq_stats, 'organize_stats', |
| 647 lambda *_args, **_kwargs: { |
| 648 'latest': cq_stats.default_stats(), |
| 649 'previous': cq_stats.default_stats()}) |
| 650 self.mock(cq_stats, 'derive_stats', lambda *_args, **_kwargs: {}) |
| 651 self.mock(cq_stats, 'derive_tree_stats', |
| 652 lambda *_: {'open': 0.0, 'total': 3600.0}) |
| 653 self.mock(cq_stats, 'derive_git_stats', lambda *_: {}) |
| 654 self.mock(cq_stats, 'derive_svn_stats', lambda *_: {}) |
| 655 |
| 656 cq_stats.acquire_stats(Args(project='blink', bots=[])) |
| 657 cq_stats.acquire_stats(Args(project='chromium', bots=[])) |
| 658 cq_stats.acquire_stats(Args( |
| 659 project='chromium', bots=[], use_logs=True, range='week')) |
| 660 cq_stats.acquire_stats(Args( |
| 661 project='chromium', bots=[], use_logs=True, range='day')) |
| 662 cq_stats.acquire_stats(Args( |
| 663 project='chromium', bots=[], use_logs=True, range='hour')) |
| 664 |
| 665 def test_main(self): |
| 666 self.mock(cq_stats, 'output', self.print_mock) |
| 667 self.mock(cq_stats, 'parse_args', lambda: Args( |
| 668 project='chromium', log_level=logging.CRITICAL, logs_black_list=None)) |
| 669 self.mock(cq_stats, 'acquire_stats', lambda _: cq_stats.default_stats()) |
| 670 cq_stats.main() |
| 671 return self.expectations |
OLD | NEW |