OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 from common import chromium_utils | |
6 import json | 7 import json |
7 import os | 8 import os |
8 import re | 9 import re |
9 import tempfile | 10 import tempfile |
10 | 11 |
11 | 12 |
12 def CompressList(lines, max_length, middle_replacement): | 13 def CompressList(lines, max_length, middle_replacement): |
13 """Ensures that |lines| is no longer than |max_length|. If |lines| need to | 14 """Ensures that |lines| is no longer than |max_length|. If |lines| need to |
14 be compressed then the middle items are replaced by |middle_replacement|. | 15 be compressed then the middle items are replaced by |middle_replacement|. |
15 """ | 16 """ |
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
412 | 413 |
413 def __init__(self): | 414 def __init__(self): |
414 self.json_file_path = None | 415 self.json_file_path = None |
415 self.delete_json_file = False | 416 self.delete_json_file = False |
416 | 417 |
417 self.disabled_tests = set() | 418 self.disabled_tests = set() |
418 self.passed_tests = set() | 419 self.passed_tests = set() |
419 self.failed_tests = set() | 420 self.failed_tests = set() |
420 self.flaky_tests = set() | 421 self.flaky_tests = set() |
421 self.test_logs = {} | 422 self.test_logs = {} |
423 self.ignored_failed_tests = set() | |
422 | 424 |
423 self.parsing_errors = [] | 425 self.parsing_errors = [] |
424 | 426 |
425 self.master_name = None | 427 self.master_name = None |
426 | 428 |
427 def ProcessLine(self, line): | 429 def ProcessLine(self, line): |
428 # Deliberately do nothing - we parse out-of-band JSON summary | 430 # Deliberately do nothing - we parse out-of-band JSON summary |
429 # instead of in-band stdout. | 431 # instead of in-band stdout. |
430 pass | 432 pass |
431 | 433 |
432 def PassedTests(self): | 434 def PassedTests(self): |
433 return sorted(self.passed_tests) | 435 return sorted(self.passed_tests) |
434 | 436 |
435 def FailedTests(self, include_fails=False, include_flaky=False): | 437 def FailedTests(self, include_fails=False, include_flaky=False): |
436 return sorted(self.failed_tests) | 438 return sorted(self.failed_tests - self.ignored_failed_tests) |
437 | 439 |
438 def FailureDescription(self, test): | 440 def FailureDescription(self, test): |
439 return self.test_logs.get(test, []) | 441 return self.test_logs.get(test, []) |
440 | 442 |
443 def IgnoredFailedTests(self): | |
444 return sorted(self.ignored_failed_tests) | |
445 | |
441 @staticmethod | 446 @staticmethod |
442 def SuppressionHashes(): | 447 def SuppressionHashes(): |
443 return [] | 448 return [] |
444 | 449 |
445 def ParsingErrors(self): | 450 def ParsingErrors(self): |
446 return self.parsing_errors | 451 return self.parsing_errors |
447 | 452 |
448 def ClearParsingErrors(self): | 453 def ClearParsingErrors(self): |
449 self.parsing_errors = ['Cleared.'] | 454 self.parsing_errors = ['Cleared.'] |
450 | 455 |
(...skipping 12 matching lines...) Expand all Loading... | |
463 self.json_file_path = cmdline_path | 468 self.json_file_path = cmdline_path |
464 # If the caller requested JSON summary, do not delete it. | 469 # If the caller requested JSON summary, do not delete it. |
465 self.delete_json_file = False | 470 self.delete_json_file = False |
466 else: | 471 else: |
467 fd, self.json_file_path = tempfile.mkstemp() | 472 fd, self.json_file_path = tempfile.mkstemp() |
468 os.close(fd) | 473 os.close(fd) |
469 # When we create the file ourselves, delete it to avoid littering. | 474 # When we create the file ourselves, delete it to avoid littering. |
470 self.delete_json_file = True | 475 self.delete_json_file = True |
471 return self.json_file_path | 476 return self.json_file_path |
472 | 477 |
473 def ProcessJSONFile(self): | 478 def ProcessJSONFile(self, build_dir): |
474 if not self.json_file_path: | 479 if not self.json_file_path: |
475 return | 480 return |
476 | 481 |
477 with open(self.json_file_path) as json_file: | 482 with open(self.json_file_path) as json_file: |
478 try: | 483 try: |
479 json_output = json_file.read() | 484 json_output = json_file.read() |
480 json_data = json.loads(json_output) | 485 json_data = json.loads(json_output) |
481 except ValueError: | 486 except ValueError: |
482 # Only signal parsing error if the file is non-empty. Empty file | 487 # Only signal parsing error if the file is non-empty. Empty file |
483 # most likely means the binary doesn't support JSON output. | 488 # most likely means the binary doesn't support JSON output. |
484 if json_output: | 489 if json_output: |
485 self.parsing_errors = json_output.split('\n') | 490 self.parsing_errors = json_output.split('\n') |
486 else: | 491 else: |
487 self.ProcessJSONData(json_data) | 492 self.ProcessJSONData(json_data, build_dir) |
488 | 493 |
489 if self.delete_json_file: | 494 if self.delete_json_file: |
490 os.remove(self.json_file_path) | 495 os.remove(self.json_file_path) |
491 | 496 |
492 def ProcessJSONData(self, json_data): | 497 @staticmethod |
498 def ParseIngoredFailedTestSpec(chrome_dir): | |
499 """Returns parsed ignored failed test spec. | |
500 | |
501 Args: | |
502 chrome_dir: Any directory within chrome checkout to be used as a reference | |
503 to find ignored failed test spec file. | |
504 | |
505 Returns: | |
506 A list of tuples (test_name, platforms), where platforms is a list of sets | |
507 of platform flags. For example: | |
508 | |
509 [('MyTest.TestOne', [set('OS_WIN', 'CPU_32_BITS', 'MODE_RELEASE'), | |
510 set('OS_LINUX', 'CPU_64_BITS', 'MODE_DEBUG')]), | |
511 ('MyTest.TestTwo', [set('OS_MACOSX', 'CPU_64_BITS', 'MODE_RELEASE'), | |
512 set('CPU_32_BITS')]), | |
513 ('MyTest.TestThree', [set()]] | |
514 """ | |
515 | |
516 try: | |
517 ignored_failed_tests_path = chromium_utils.FindUpward( | |
518 os.path.abspath(chrome_dir), 'tools', 'ignorer_bot', | |
ghost stip (do not use)
2014/07/11 18:24:07
I'm not sure I understand what is going on here. I
Sergiy Byelozyorov
2014/07/16 16:23:25
In the runtest.py we only have a path to the build
| |
519 'ignored_failed_tests.txt') | |
520 except chromium_utils.PathNotFound: | |
521 return | |
522 | |
523 with open(ignored_failed_tests_path) as ignored_failed_tests_file: | |
524 ignored_failed_tests_spec = ignored_failed_tests_file.readlines() | |
525 | |
526 parsed_spec = [] | |
527 for spec_line in ignored_failed_tests_spec: | |
528 spec_line = spec_line.strip() | |
529 if spec_line.startswith('#') or not spec_line: | |
530 continue | |
531 | |
532 # Any number of platform flags identifiers separated by whitespace. | |
533 platform_spec_regexp = r'[A-Za-z0-9_\s]*' | |
534 | |
535 match = re.match( | |
ghost stip (do not use)
2014/07/11 18:24:07
Again, I strongly recommend using YAML for this. T
Sergiy Byelozyorov
2014/07/16 16:23:25
Let's discuss this today.
| |
536 r'^http://crbug.com/\d+' # Issue URL. | |
ghost stip (do not use)
2014/07/11 18:24:07
https://
Sergiy Byelozyorov
2014/07/16 16:23:25
In fact this should go away altogether as now we j
| |
537 r'\s+' # Some whitespace. | |
538 r'\[(' + # Opening square bracket '['. | |
539 platform_spec_regexp + # At least one platform, and... | |
540 r'(?:,' + # ...separated by commas... | |
541 platform_spec_regexp + # ...any number of additional... | |
542 r')*' # ...platforms. | |
543 r')\]' # Closing square bracket ']'. | |
544 r'\s+' # Some whitespace. | |
545 r'(\S+)$', spec_line) # Test name. | |
546 | |
547 if not match: | |
548 continue | |
549 | |
550 platform_specs = match.group(1).strip() | |
551 test_name = match.group(2).strip() | |
552 | |
553 platforms = [set(platform.split()) | |
554 for platform in platform_specs.split(',')] | |
555 | |
556 parsed_spec.append((test_name, platforms)) | |
557 | |
558 return parsed_spec | |
559 | |
560 | |
561 def _RetrieveIgnoredFailuresForPlatform(self, build_dir, platform_flags): | |
562 """Parses the ignored failed tests spec into self.ignored_failed_tests.""" | |
563 if not build_dir: | |
564 return | |
565 | |
566 platform_flags = set(platform_flags) | |
567 parsed_spec = self.ParseIngoredFailedTestSpec(build_dir) | |
568 | |
569 if not parsed_spec: | |
570 return | |
571 | |
572 for test_name, platforms in parsed_spec: | |
573 for required_platform_flags in platforms: | |
574 if required_platform_flags.issubset(platform_flags): | |
575 self.ignored_failed_tests.add(test_name) | |
576 break | |
577 | |
578 def ProcessJSONData(self, json_data, build_dir=None): | |
493 # TODO(phajdan.jr): Require disabled_tests to be present (May 2014). | 579 # TODO(phajdan.jr): Require disabled_tests to be present (May 2014). |
494 self.disabled_tests.update(json_data.get('disabled_tests', [])) | 580 self.disabled_tests.update(json_data.get('disabled_tests', [])) |
581 self._RetrieveIgnoredFailuresForPlatform(build_dir, | |
582 json_data.get('global_tags', [])) | |
495 | 583 |
496 for iteration_data in json_data['per_iteration_data']: | 584 for iteration_data in json_data['per_iteration_data']: |
497 for test_name, test_runs in iteration_data.iteritems(): | 585 for test_name, test_runs in iteration_data.iteritems(): |
498 if test_runs[-1]['status'] == 'SUCCESS': | 586 if test_runs[-1]['status'] == 'SUCCESS': |
499 self.passed_tests.add(test_name) | 587 self.passed_tests.add(test_name) |
500 else: | 588 else: |
501 self.failed_tests.add(test_name) | 589 self.failed_tests.add(test_name) |
502 | 590 |
503 if len(test_runs) > 1: | 591 if len(test_runs) > 1: |
504 self.flaky_tests.add(test_name) | 592 self.flaky_tests.add(test_name) |
505 | 593 |
506 self.test_logs.setdefault(test_name, []) | 594 self.test_logs.setdefault(test_name, []) |
507 for run_index, run_data in enumerate(test_runs, start=1): | 595 for run_index, run_data in enumerate(test_runs, start=1): |
508 run_lines = ['%s (run #%d):' % (test_name, run_index)] | 596 run_lines = ['%s (run #%d):' % (test_name, run_index)] |
509 # Make sure the annotations are ASCII to avoid character set related | 597 # Make sure the annotations are ASCII to avoid character set related |
510 # errors. They are mostly informational anyway, and more detailed | 598 # errors. They are mostly informational anyway, and more detailed |
511 # info can be obtained from the original JSON output. | 599 # info can be obtained from the original JSON output. |
512 ascii_lines = run_data['output_snippet'].encode('ascii', | 600 ascii_lines = run_data['output_snippet'].encode('ascii', |
513 errors='replace') | 601 errors='replace') |
514 decoded_lines = CompressList( | 602 decoded_lines = CompressList( |
515 ascii_lines.decode('string_escape').split('\n'), | 603 ascii_lines.decode('string_escape').split('\n'), |
516 self.OUTPUT_SNIPPET_LINES_LIMIT, | 604 self.OUTPUT_SNIPPET_LINES_LIMIT, |
517 '<truncated, full output is in gzipped JSON ' | 605 '<truncated, full output is in gzipped JSON ' |
518 'output at end of step>') | 606 'output at end of step>') |
519 run_lines.extend(decoded_lines) | 607 run_lines.extend(decoded_lines) |
520 self.test_logs[test_name].extend(run_lines) | 608 self.test_logs[test_name].extend(run_lines) |
OLD | NEW |