| Index: tools/accessibility/rebase_dump_accessibility_tree_test.py
|
| diff --git a/tools/accessibility/rebase_dump_accessibility_tree_test.py b/tools/accessibility/rebase_dump_accessibility_tree_test.py
|
| index 560114458e9f69e1b3933eaba3d7d1daf6acab5a..2a6ccb08e1e8c42ccdfbba75fbc25daac60f78c0 100755
|
| --- a/tools/accessibility/rebase_dump_accessibility_tree_test.py
|
| +++ b/tools/accessibility/rebase_dump_accessibility_tree_test.py
|
| @@ -16,6 +16,7 @@ expectation files locally. From there you can run 'git diff' to make sure all
|
| of the changes look reasonable, then upload the change for code review.
|
| """
|
|
|
| +import json
|
| import os
|
| import re
|
| import sys
|
| @@ -42,65 +43,84 @@ def GitClIssue():
|
|
|
| def ParseFailure(name, url):
|
| '''Parse given the name of a failing trybot and the url of its build log.'''
|
| + print
|
| + print "Checking trybot: %s" % name
|
| + url = url.replace('/builders/', '/json/builders/')
|
| + response = urllib.urlopen(url)
|
| + if response.getcode() == 200:
|
| + jsondata = response.read()
|
|
|
| - # Figure out the platform.
|
| - if name.find('android') >= 0:
|
| - platform_suffix = '-expected-android.txt'
|
| - elif name.find('mac') >= 0:
|
| - platform_suffix = '-expected-mac.txt'
|
| - elif name.find('win') >= 0:
|
| - platform_suffix = '-expected-win.txt'
|
| - else:
|
| + if not jsondata:
|
| + print "Failed to fetch from: " + url
|
| return
|
|
|
| - # Read the content_browsertests log file.
|
| - data = None
|
| - lines = None
|
| - urls = []
|
| - for url_suffix in [
|
| - '/steps/content_browsertests%20(with%20patch)/logs/stdio/text',
|
| - '/steps/content_browsertests/logs/stdio/text']:
|
| - urls.append(url + url_suffix)
|
| - for url in urls:
|
| - response = urllib.urlopen(url)
|
| - if response.getcode() == 200:
|
| - data = response.read()
|
| - lines = data.splitlines()
|
| - break
|
| -
|
| - if not data:
|
| + try:
|
| + data = json.loads(jsondata)
|
| + except:
|
| + print "Failed to parse JSON from: " + url
|
| return
|
|
|
| - # Parse the log file for failing tests and overwrite the expected
|
| - # result file locally with the actual results from the log.
|
| - test_name = None
|
| + for step in data["steps"]:
|
| + name = step["name"]
|
| + if name[:len("content_browsertests")] == "content_browsertests":
|
| + if name.find("without") >= 0:
|
| + continue
|
| + if name.find("retry") >= 0:
|
| + continue
|
| + print "Found content_browsertests logs"
|
| + for log in step["logs"]:
|
| + (log_name, log_url) = log
|
| + if log_name == "stdio":
|
| + continue
|
| + log_url += '/text'
|
| + log_response = urllib.urlopen(log_url)
|
| + if log_response.getcode() == 200:
|
| + logdata = log_response.read()
|
| + ParseLog(logdata)
|
| + else:
|
| + print "Failed to fetch test log data from: " + url
|
| +
|
| +def Fix(line):
|
| + if line[:3] == '@@@':
|
| + try:
|
| + line = re.search('[^@]@([^@]*)@@@', line).group(1)
|
| + except:
|
| + pass
|
| + return line
|
| +
|
| +def ParseLog(logdata):
|
| + '''Parse the log file for failing tests and overwrite the expected
|
| + result file locally with the actual results from the log.'''
|
| + lines = logdata.splitlines()
|
| + test_file = None
|
| + expected_file = None
|
| start = None
|
| - filename = None
|
| for i in range(len(lines)):
|
| - line = lines[i]
|
| - if line[:12] == '[ RUN ]':
|
| - test_name = line[13:]
|
| - if test_name and line[:8] == 'Testing:':
|
| - filename = re.search('content.test.*accessibility.(.*)', line).group(1)
|
| - if test_name and line == 'Actual':
|
| + line = Fix(lines[i])
|
| + if line.find('Testing:') >= 0:
|
| + test_file = re.search(
|
| + 'content.test.*accessibility.([^@]*)', line).group(1)
|
| + expected_file = None
|
| + start = None
|
| + if line.find('Expected output:') >= 0:
|
| + expected_file = re.search(
|
| + 'content.test.*accessibility.([^@]*)', line).group(1)
|
| + if line == 'Actual':
|
| start = i + 2
|
| - if start and test_name and filename and line[:12] == '[ FAILED ]':
|
| - # Get the path to the html file.
|
| - dst_fullpath = os.path.join(TEST_DATA_PATH, filename)
|
| - # Strip off .html and replace it with the platform expected suffix.
|
| - dst_fullpath = dst_fullpath[:-5] + platform_suffix
|
| + if start and test_file and expected_file and line.find('End-of-file') >= 0:
|
| + dst_fullpath = os.path.join(TEST_DATA_PATH, expected_file)
|
| if dst_fullpath in completed_files:
|
| continue
|
|
|
| - actual = [line for line in lines[start : i - 1] if line]
|
| + actual = [Fix(line) for line in lines[start : i] if line]
|
| fp = open(dst_fullpath, 'w')
|
| fp.write('\n'.join(actual))
|
| fp.close()
|
| - print dst_fullpath
|
| + print "* %s" % os.path.relpath(dst_fullpath)
|
| completed_files.add(dst_fullpath)
|
| start = None
|
| - test_name = None
|
| - filename = None
|
| + test_file = None
|
| + expected_file = None
|
|
|
| def ParseTrybots(data):
|
| '''Parse the code review page to find links to try bots.'''
|
| @@ -126,8 +146,20 @@ def Run():
|
| response = urllib.urlopen(url)
|
| if response.getcode() != 200:
|
| print 'Error code %d accessing url: %s' % (response.getcode(), url)
|
| + return
|
| data = response.read()
|
| ParseTrybots(data)
|
|
|
| + print
|
| + if len(completed_files) == 0:
|
| + print "No output from DumpAccessibilityTree test results found."
|
| + return
|
| + else:
|
| + print "Summary: modified the following files:"
|
| + all_files = list(completed_files)
|
| + all_files.sort()
|
| + for f in all_files:
|
| + print "* %s" % os.path.relpath(f)
|
| +
|
| if __name__ == '__main__':
|
| sys.exit(Run())
|
|
|