Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(216)

Side by Side Diff: scripts/slave/runtest.py

Issue 548773004: Rename process_log_utils and related things. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Renamed remaining occurrences of "results_tracker". Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """A tool used to run a Chrome test executable and process the output. 6 """A tool used to run a Chrome test executable and process the output.
7 7
8 This script is used by the buildbot slaves. It must be run from the outer 8 This script is used by the buildbot slaves. It must be run from the outer
9 build directory, e.g. chrome-release/build/. 9 build directory, e.g. chrome-release/build/.
10 10
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
44 from common import chromium_utils 44 from common import chromium_utils
45 from common import gtest_utils 45 from common import gtest_utils
46 46
47 # TODO(crbug.com/403564). We almost certainly shouldn't be importing this. 47 # TODO(crbug.com/403564). We almost certainly shouldn't be importing this.
48 import config 48 import config
49 49
50 from slave import annotation_utils 50 from slave import annotation_utils
51 from slave import build_directory 51 from slave import build_directory
52 from slave import crash_utils 52 from slave import crash_utils
53 from slave import gtest_slave_utils 53 from slave import gtest_slave_utils
54 from slave import process_log_utils 54 from slave import performance_log_processor
55 from slave import results_dashboard 55 from slave import results_dashboard
56 from slave import slave_utils 56 from slave import slave_utils
57 from slave import xvfb 57 from slave import xvfb
58 58
59 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) 59 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0])
60 60
61 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' 61 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
62 62
63 # Directory to write JSON for test results into. 63 # Directory to write JSON for test results into.
64 DEST_DIR = 'gtest_results' 64 DEST_DIR = 'gtest_results'
65 65
66 # Names of httpd configuration file under different platforms. 66 # Names of httpd configuration file under different platforms.
67 HTTPD_CONF = { 67 HTTPD_CONF = {
68 'linux': 'httpd2_linux.conf', 68 'linux': 'httpd2_linux.conf',
69 'mac': 'httpd2_mac.conf', 69 'mac': 'httpd2_mac.conf',
70 'win': 'httpd.conf' 70 'win': 'httpd.conf'
71 } 71 }
72 # Regex matching git comment lines containing svn revision info. 72 # Regex matching git comment lines containing svn revision info.
73 GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$') 73 GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
74 # Regex for the master branch commit position. 74 # Regex for the master branch commit position.
75 GIT_CR_POS_RE = re.compile('^Cr-Commit-Position: refs/heads/master@{#(\d+)}$') 75 GIT_CR_POS_RE = re.compile('^Cr-Commit-Position: refs/heads/master@{#(\d+)}$')
76 76
77 # The directory that this script is in. 77 # The directory that this script is in.
78 BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 78 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
79 79
80 LOG_PROCESSOR_CLASSES = {
81 'gtest': gtest_utils.GTestLogParser,
82 'graphing': performance_log_processor.GraphingLogProcessor,
83 'pagecycler': performance_log_processor.GraphingPageCyclerLogProcessor,
84 }
85
80 86
81 def _ShouldEnableSandbox(sandbox_path): 87 def _ShouldEnableSandbox(sandbox_path):
82 """Checks whether the current slave should use the sandbox. 88 """Checks whether the current slave should use the sandbox.
83 89
84 This is based on should_enable_sandbox in src/testing/test_env.py. 90 This is based on should_enable_sandbox in src/testing/test_env.py.
85 91
86 Args: 92 Args:
87 sandbox_path: Path to sandbox file. 93 sandbox_path: Path to sandbox file.
88 94
89 Returns: 95 Returns:
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
159 except OSError as e: 165 except OSError as e:
160 print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e) 166 print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e)
161 # Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment 167 # Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment
162 # variable too. Some of the bots seem to re-invoke runtest.py in a 168 # variable too. Some of the bots seem to re-invoke runtest.py in a
163 # way that this variable sticks around from run to run. 169 # way that this variable sticks around from run to run.
164 if 'DBUS_SESSION_BUS_ADDRESS' in os.environ: 170 if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
165 del os.environ['DBUS_SESSION_BUS_ADDRESS'] 171 del os.environ['DBUS_SESSION_BUS_ADDRESS']
166 print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable' 172 print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable'
167 173
168 174
169 def _RunGTestCommand(command, extra_env, results_tracker=None, pipes=None): 175 def _RunGTestCommand(command, extra_env, log_processor=None, pipes=None):
170 """Runs a test, printing and possibly processing the output. 176 """Runs a test, printing and possibly processing the output.
171 177
172 Args: 178 Args:
173 command: A list of strings in a command (the command and its arguments). 179 command: A list of strings in a command (the command and its arguments).
174 extra_env: A dictionary of extra environment variables to set. 180 extra_env: A dictionary of extra environment variables to set.
175 results_tracker: A "log processor" class which has the ProcessLine method. 181 log_processor: A log processor instance which has the ProcessLine method.
176 pipes: A list of command string lists which the output will be piped to. 182 pipes: A list of command string lists which the output will be piped to.
177 183
178 Returns: 184 Returns:
179 The process return code. 185 The process return code.
180 """ 186 """
181 env = os.environ.copy() 187 env = os.environ.copy()
182 if extra_env: 188 if extra_env:
183 print 'Additional test environment:' 189 print 'Additional test environment:'
184 for k, v in sorted(extra_env.items()): 190 for k, v in sorted(extra_env.items()):
185 print ' %s=%s' % (k, v) 191 print ' %s=%s' % (k, v)
186 env.update(extra_env or {}) 192 env.update(extra_env or {})
187 193
188 # Trigger bot mode (test retries, redirection of stdio, possibly faster, 194 # Trigger bot mode (test retries, redirection of stdio, possibly faster,
189 # etc.) - using an environment variable instead of command-line flags because 195 # etc.) - using an environment variable instead of command-line flags because
190 # some internal waterfalls run this (_RunGTestCommand) for totally non-gtest 196 # some internal waterfalls run this (_RunGTestCommand) for totally non-gtest
191 # code. 197 # code.
192 # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed. 198 # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
193 env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'}) 199 env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})
194 200
195 if results_tracker: 201 if log_processor:
196 return chromium_utils.RunCommand( 202 return chromium_utils.RunCommand(
197 command, pipes=pipes, parser_func=results_tracker.ProcessLine, env=env) 203 command, pipes=pipes, parser_func=log_processor.ProcessLine, env=env)
198 else: 204 else:
199 return chromium_utils.RunCommand(command, pipes=pipes, env=env) 205 return chromium_utils.RunCommand(command, pipes=pipes, env=env)
200 206
201 207
202 def _GetMaster(): 208 def _GetMaster():
203 """Returns a master name as listed in the slaves.cfg file.""" 209 """Returns a master name as listed in the slaves.cfg file."""
204 return slave_utils.GetActiveMaster() 210 return slave_utils.GetActiveMaster()
205 211
206 212
207 def _GetMasterString(master): 213 def _GetMasterString(master):
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 The git SHA1 hash string. 299 The git SHA1 hash string.
294 """ 300 """
295 git_exe = 'git.bat' if sys.platform.startswith('win') else 'git' 301 git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
296 p = subprocess.Popen( 302 p = subprocess.Popen(
297 [git_exe, 'rev-parse', 'HEAD'], 303 [git_exe, 'rev-parse', 'HEAD'],
298 cwd=in_directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 304 cwd=in_directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
299 (stdout, _) = p.communicate() 305 (stdout, _) = p.communicate()
300 return stdout.strip() 306 return stdout.strip()
301 307
302 308
303 def _GenerateJSONForTestResults(options, results_tracker): 309 def _GenerateJSONForTestResults(options, log_processor):
304 """Generates or updates a JSON file from the gtest results XML and upload the 310 """Generates or updates a JSON file from the gtest results XML and upload the
305 file to the archive server. 311 file to the archive server.
306 312
307 The archived JSON file will be placed at: 313 The archived JSON file will be placed at:
308 www-dir/DEST_DIR/buildname/testname/results.json 314 www-dir/DEST_DIR/buildname/testname/results.json
309 on the archive server. NOTE: This will be deprecated. 315 on the archive server. NOTE: This will be deprecated.
310 316
311 Args: 317 Args:
312 options: command-line options that are supposed to have build_dir, 318 options: command-line options that are supposed to have build_dir,
313 results_directory, builder_name, build_name and test_output_xml values. 319 results_directory, builder_name, build_name and test_output_xml values.
320 log_processor: An instance of PerformanceLogProcessor or similar class.
314 321
315 Returns: 322 Returns:
316 True upon success, False upon failure. 323 True upon success, False upon failure.
317 """ 324 """
318 results_map = None 325 results_map = None
319 try: 326 try:
320 if (os.path.exists(options.test_output_xml) and 327 if (os.path.exists(options.test_output_xml) and
321 not _UsingGtestJson(options)): 328 not _UsingGtestJson(options)):
322 results_map = gtest_slave_utils.GetResultsMapFromXML( 329 results_map = gtest_slave_utils.GetResultsMapFromXML(
323 options.test_output_xml) 330 options.test_output_xml)
324 else: 331 else:
325 if _UsingGtestJson(options): 332 if _UsingGtestJson(options):
326 sys.stderr.write('using JSON summary output instead of gtest XML\n') 333 sys.stderr.write('using JSON summary output instead of gtest XML\n')
327 else: 334 else:
328 sys.stderr.write( 335 sys.stderr.write(
329 ('"%s" \ "%s" doesn\'t exist: Unable to generate JSON from XML, ' 336 ('"%s" \ "%s" doesn\'t exist: Unable to generate JSON from XML, '
330 'using log output.\n') % (os.getcwd(), options.test_output_xml)) 337 'using log output.\n') % (os.getcwd(), options.test_output_xml))
331 # The file did not get generated. See if we can generate a results map 338 # The file did not get generated. See if we can generate a results map
332 # from the log output. 339 # from the log output.
333 results_map = gtest_slave_utils.GetResultsMap(results_tracker) 340 results_map = gtest_slave_utils.GetResultsMap(log_processor)
334 except Exception as e: 341 except Exception as e:
335 # This error will be caught by the following 'not results_map' statement. 342 # This error will be caught by the following 'not results_map' statement.
336 print 'Error: ', e 343 print 'Error: ', e
337 344
338 if not results_map: 345 if not results_map:
339 print 'No data was available to update the JSON results' 346 print 'No data was available to update the JSON results'
340 # Consider this non-fatal. 347 # Consider this non-fatal.
341 return True 348 return True
342 349
343 build_dir = os.path.abspath(options.build_dir) 350 build_dir = os.path.abspath(options.build_dir)
344 slave_name = slave_utils.SlaveBuildName(build_dir) 351 slave_name = slave_utils.SlaveBuildName(build_dir)
345 352
346 generate_json_options = copy.copy(options) 353 generate_json_options = copy.copy(options)
347 generate_json_options.build_name = slave_name 354 generate_json_options.build_name = slave_name
348 generate_json_options.input_results_xml = options.test_output_xml 355 generate_json_options.input_results_xml = options.test_output_xml
349 generate_json_options.builder_base_url = '%s/%s/%s/%s' % ( 356 generate_json_options.builder_base_url = '%s/%s/%s/%s' % (
350 config.Master.archive_url, DEST_DIR, slave_name, options.test_type) 357 config.Master.archive_url, DEST_DIR, slave_name, options.test_type)
351 generate_json_options.master_name = options.master_class_name or _GetMaster() 358 generate_json_options.master_name = options.master_class_name or _GetMaster()
352 generate_json_options.test_results_server = config.Master.test_results_server 359 generate_json_options.test_results_server = config.Master.test_results_server
353 360
354 # Print out master name for log_parser
355 print _GetMasterString(generate_json_options.master_name) 361 print _GetMasterString(generate_json_options.master_name)
356 362
357 generator = None 363 generator = None
358 364
359 try: 365 try:
360 if options.revision: 366 if options.revision:
361 generate_json_options.chrome_revision = options.revision 367 generate_json_options.chrome_revision = options.revision
362 else: 368 else:
363 chrome_dir = chromium_utils.FindUpwardParent(build_dir, 'third_party') 369 chrome_dir = chromium_utils.FindUpwardParent(build_dir, 'third_party')
364 generate_json_options.chrome_revision = _GetRevision(chrome_dir) 370 generate_json_options.chrome_revision = _GetRevision(chrome_dir)
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
516 return http_server 522 return http_server
517 523
518 524
519 def _UsingGtestJson(options): 525 def _UsingGtestJson(options):
520 """Returns True if we're using GTest JSON summary.""" 526 """Returns True if we're using GTest JSON summary."""
521 return (options.annotate == 'gtest' and 527 return (options.annotate == 'gtest' and
522 not options.run_python_script and 528 not options.run_python_script and
523 not options.run_shell_script) 529 not options.run_shell_script)
524 530
525 531
526 def _GetParsers(): 532 def _ListLogProcessors(selection):
527 """Returns a dictionary mapping strings to log parser classes.""" 533 """Prints a list of available log processor classes iff the input is 'list'.
528 parsers = {
529 'gtest': gtest_utils.GTestLogParser,
530 'graphing': process_log_utils.GraphingLogProcessor,
531 'pagecycler': process_log_utils.GraphingPageCyclerLogProcessor,
532 }
533 return parsers
534
535
536 def _ListParsers(selection):
537 """Prints a list of available log parser classes iff the input is 'list'.
538 534
539 Args: 535 Args:
540 selection: A log parser name, or the string "list". 536 selection: A log processor name, or the string "list".
541 537
542 Returns: 538 Returns:
543 True if a list was printed, False otherwise. 539 True if a list was printed, False otherwise.
544 """ 540 """
545 parsers = _GetParsers()
546 shouldlist = selection and selection == 'list' 541 shouldlist = selection and selection == 'list'
547 if shouldlist: 542 if shouldlist:
548 print 543 print
549 print 'Available log parsers:' 544 print 'Available log processors:'
550 for p in parsers: 545 for p in LOG_PROCESSOR_CLASSES:
551 print ' ', p, parsers[p].__name__ 546 print ' ', p, LOG_PROCESSOR_CLASSES[p].__name__
552 547
553 return shouldlist 548 return shouldlist
554 549
555 550
556 def _SelectResultsTracker(options): 551 def _SelectLogProcessor(options):
557 """Returns a log parser class (aka results tracker class). 552 """Returns a log processor class based on the command line options.
558 553
559 Args: 554 Args:
560 options: Command-line options (from OptionParser). 555 options: Command-line options (from OptionParser).
561 556
562 Returns: 557 Returns:
563 A log parser class (aka results tracker class), or None. 558 A log processor class, or None.
564 """ 559 """
565 if _UsingGtestJson(options): 560 if _UsingGtestJson(options):
566 return gtest_utils.GTestJSONParser 561 return gtest_utils.GTestJSONParser
567 562
568 parsers = _GetParsers()
569 if options.annotate: 563 if options.annotate:
570 if options.annotate in parsers: 564 if options.annotate in LOG_PROCESSOR_CLASSES:
571 if options.generate_json_file and options.annotate != 'gtest': 565 if options.generate_json_file and options.annotate != 'gtest':
572 raise NotImplementedError('"%s" doesn\'t make sense with ' 566 raise NotImplementedError('"%s" doesn\'t make sense with '
573 'options.generate_json_file.') 567 'options.generate_json_file.')
574 else: 568 else:
575 return parsers[options.annotate] 569 return LOG_PROCESSOR_CLASSES[options.annotate]
576 else: 570 else:
577 raise KeyError('"%s" is not a valid GTest parser!' % options.annotate) 571 raise KeyError('"%s" is not a valid GTest parser!' % options.annotate)
578 elif options.generate_json_file: 572 elif options.generate_json_file:
579 return parsers['gtest'] 573 return LOG_PROCESSOR_CLASSES['gtest']
580 574
581 return None 575 return None
582 576
583 577
584 def _GetCommitPos(build_properties): 578 def _GetCommitPos(build_properties):
585 """Extract the commit position from the build properties, if its there.""" 579 """Extracts the commit position from the build properties, if its there."""
586 if 'got_revision_cp' not in build_properties: 580 if 'got_revision_cp' not in build_properties:
587 return None 581 return None
588 commit_pos = build_properties['got_revision_cp'] 582 commit_pos = build_properties['got_revision_cp']
589 return int(re.search(r'{#(\d+)}', commit_pos).group(1)) 583 return int(re.search(r'{#(\d+)}', commit_pos).group(1))
590 584
591 585
592 def _CreateResultsTracker(tracker_class, options): 586 def _CreateLogProcessor(log_processor_class, options):
593 """Instantiate a log parser (aka results tracker). 587 """Creates a log processor instance.
594 588
595 Args: 589 Args:
596 tracker_class: A log parser class. 590 log_processor_class: A subclass of PerformanceLogProcessor or similar class.
597 options: Command-line options (from OptionParser). 591 options: Command-line options (from OptionParser).
598 592
599 Returns: 593 Returns:
600 An instance of a log parser class, or None. 594 An instance of a log processor class, or None.
601 """ 595 """
602 if not tracker_class: 596 if not log_processor_class:
603 return None 597 return None
604 598
605 if tracker_class.__name__ in ('GTestLogParser',): 599 if log_processor_class.__name__ in ('GTestLogParser',):
606 tracker_obj = tracker_class() 600 tracker_obj = log_processor_class()
607 elif tracker_class.__name__ in ('GTestJSONParser',): 601 elif log_processor_class.__name__ in ('GTestJSONParser',):
608 tracker_obj = tracker_class(options.build_properties.get('mastername')) 602 tracker_obj = log_processor_class(
603 options.build_properties.get('mastername'))
609 else: 604 else:
610 build_dir = os.path.abspath(options.build_dir) 605 build_dir = os.path.abspath(options.build_dir)
611 606
612 if options.webkit_revision: 607 if options.webkit_revision:
613 webkit_revision = options.webkit_revision 608 webkit_revision = options.webkit_revision
614 else: 609 else:
615 try: 610 try:
616 webkit_dir = chromium_utils.FindUpward( 611 webkit_dir = chromium_utils.FindUpward(
617 build_dir, 'third_party', 'WebKit', 'Source') 612 build_dir, 'third_party', 'WebKit', 'Source')
618 webkit_revision = _GetRevision(webkit_dir) 613 webkit_revision = _GetRevision(webkit_dir)
619 except Exception: 614 except Exception:
620 webkit_revision = 'undefined' 615 webkit_revision = 'undefined'
621 616
622 commit_pos_num = _GetCommitPos(options.build_properties) 617 commit_pos_num = _GetCommitPos(options.build_properties)
623 if commit_pos_num is not None: 618 if commit_pos_num is not None:
624 revision = commit_pos_num 619 revision = commit_pos_num
625 elif options.revision: 620 elif options.revision:
626 revision = options.revision 621 revision = options.revision
627 else: 622 else:
628 revision = _GetRevision(os.path.dirname(build_dir)) 623 revision = _GetRevision(os.path.dirname(build_dir))
629 624
630 tracker_obj = tracker_class( 625 tracker_obj = log_processor_class(
631 revision=revision, 626 revision=revision,
632 build_properties=options.build_properties, 627 build_properties=options.build_properties,
633 factory_properties=options.factory_properties, 628 factory_properties=options.factory_properties,
634 webkit_revision=webkit_revision) 629 webkit_revision=webkit_revision)
635 630
636 if options.annotate and options.generate_json_file: 631 if options.annotate and options.generate_json_file:
637 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) 632 tracker_obj.ProcessLine(_GetMasterString(_GetMaster()))
638 633
639 return tracker_obj 634 return tracker_obj
640 635
(...skipping 12 matching lines...) Expand all
653 supplemental_columns = {} 648 supplemental_columns = {}
654 supplemental_columns_file = os.path.join(build_dir, 649 supplemental_columns_file = os.path.join(build_dir,
655 results_dashboard.CACHE_DIR, 650 results_dashboard.CACHE_DIR,
656 supplemental_colummns_file_name) 651 supplemental_colummns_file_name)
657 if os.path.exists(supplemental_columns_file): 652 if os.path.exists(supplemental_columns_file):
658 with file(supplemental_columns_file, 'r') as f: 653 with file(supplemental_columns_file, 'r') as f:
659 supplemental_columns = json.loads(f.read()) 654 supplemental_columns = json.loads(f.read())
660 return supplemental_columns 655 return supplemental_columns
661 656
662 657
663 def _SendResultsToDashboard(results_tracker, system, test, url, build_dir, 658 def _SendResultsToDashboard(log_processor, system, test, url, build_dir,
664 mastername, buildername, buildnumber, 659 mastername, buildername, buildnumber,
665 supplemental_columns_file, extra_columns=None): 660 supplemental_columns_file, extra_columns=None):
666 """Sends results from a results tracker (aka log parser) to the dashboard. 661 """Sends results from a log processor instance to the dashboard.
667 662
668 Args: 663 Args:
669 results_tracker: An instance of a log parser class, which has been used to 664 log_processor: An instance of a log processor class, which has been used to
670 process the test output, so it contains the test results. 665 process the test output, so it contains the test results.
671 system: A string such as 'linux-release', which comes from perf_id. 666 system: A string such as 'linux-release', which comes from perf_id.
672 test: Test "suite" name string. 667 test: Test "suite" name string.
673 url: Dashboard URL. 668 url: Dashboard URL.
674 build_dir: Build dir name (used for cache file by results_dashboard). 669 build_dir: Build dir name (used for cache file by results_dashboard).
675 mastername: Buildbot master name, e.g. 'chromium.perf'. 670 mastername: Buildbot master name, e.g. 'chromium.perf'.
676 WARNING! This is incorrectly called "masterid" in some parts of the 671 WARNING! This is incorrectly called "masterid" in some parts of the
677 dashboard code. 672 dashboard code.
678 buildername: Builder name, e.g. 'Linux QA Perf (1)' 673 buildername: Builder name, e.g. 'Linux QA Perf (1)'
679 buildnumber: Build number (as a string). 674 buildnumber: Build number (as a string).
680 supplemental_columns_file: Filename for JSON supplemental columns file. 675 supplemental_columns_file: Filename for JSON supplemental columns file.
681 extra_columns: A dict of extra values to add to the supplemental columns 676 extra_columns: A dict of extra values to add to the supplemental columns
682 dict. 677 dict.
683 """ 678 """
684 if system is None: 679 if system is None:
685 # perf_id not specified in factory properties. 680 # perf_id not specified in factory properties.
686 print 'Error: No system name (perf_id) specified when sending to dashboard.' 681 print 'Error: No system name (perf_id) specified when sending to dashboard.'
687 return 682 return
688 supplemental_columns = _GetSupplementalColumns( 683 supplemental_columns = _GetSupplementalColumns(
689 build_dir, supplemental_columns_file) 684 build_dir, supplemental_columns_file)
690 if extra_columns: 685 if extra_columns:
691 supplemental_columns.update(extra_columns) 686 supplemental_columns.update(extra_columns)
692 687
693 charts = _GetDataFromLogProcessor(results_tracker) 688 charts = _GetDataFromLogProcessor(log_processor)
694 points = results_dashboard.MakeListOfPoints( 689 points = results_dashboard.MakeListOfPoints(
695 charts, system, test, mastername, buildername, buildnumber, 690 charts, system, test, mastername, buildername, buildnumber,
696 supplemental_columns) 691 supplemental_columns)
697 results_dashboard.SendResults(points, url, build_dir) 692 results_dashboard.SendResults(points, url, build_dir)
698 693
699 694
700 def _GetDataFromLogProcessor(log_processor): 695 def _GetDataFromLogProcessor(log_processor):
701 """Returns a mapping of chart names to chart data. 696 """Returns a mapping of chart names to chart data.
702 697
703 Args: 698 Args:
704 log_processor: A log processor (aka results tracker) object. 699 log_processor: A log processor (aka results tracker) object.
705 700
706 Returns: 701 Returns:
707 A dictionary mapping chart name to lists of chart data. 702 A dictionary mapping chart name to lists of chart data.
708 put together in process_log_utils. Each chart data dictionary contains: 703 put together in log_processor. Each chart data dictionary contains:
709 "traces": A dictionary mapping trace names to value, stddev pairs. 704 "traces": A dictionary mapping trace names to value, stddev pairs.
710 "units": Units for the chart. 705 "units": Units for the chart.
711 "rev": A revision number or git hash. 706 "rev": A revision number or git hash.
712 Plus other revision keys, e.g. webkit_rev, ver, v8_rev. 707 Plus other revision keys, e.g. webkit_rev, ver, v8_rev.
713 """ 708 """
714 charts = {} 709 charts = {}
715 for log_file_name, line_list in log_processor.PerformanceLogs().iteritems(): 710 for log_file_name, line_list in log_processor.PerformanceLogs().iteritems():
716 if not log_file_name.endswith('-summary.dat'): 711 if not log_file_name.endswith('-summary.dat'):
717 # The log processor data also contains "graphs list" file contents, 712 # The log processor data also contains "graphs list" file contents,
718 # which we can ignore. 713 # which we can ignore.
719 continue 714 continue
720 chart_name = log_file_name.replace('-summary.dat', '') 715 chart_name = log_file_name.replace('-summary.dat', '')
721 716
722 # It's assumed that the log lines list has length one, because for each 717 # It's assumed that the log lines list has length one, because for each
723 # graph name only one line is added in process_log_utils in the method 718 # graph name only one line is added in log_processor in the method
724 # GraphingLogProcessor._CreateSummaryOutput. 719 # GraphingLogProcessor._CreateSummaryOutput.
725 if len(line_list) != 1: 720 if len(line_list) != 1:
726 print 'Error: Unexpected log processor line list: %s' % str(line_list) 721 print 'Error: Unexpected log processor line list: %s' % str(line_list)
727 continue 722 continue
728 line = line_list[0].rstrip() 723 line = line_list[0].rstrip()
729 try: 724 try:
730 charts[chart_name] = json.loads(line) 725 charts[chart_name] = json.loads(line)
731 except ValueError: 726 except ValueError:
732 print 'Error: Could not parse JSON: %s' % line 727 print 'Error: Could not parse JSON: %s' % line
733 return charts 728 return charts
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
945 perf_id = options.factory_properties.get('perf_id') 940 perf_id = options.factory_properties.get('perf_id')
946 if options.factory_properties.get('add_perf_id_suffix'): 941 if options.factory_properties.get('add_perf_id_suffix'):
947 perf_id += options.build_properties.get('perf_id_suffix') 942 perf_id += options.build_properties.get('perf_id_suffix')
948 return perf_id 943 return perf_id
949 944
950 945
951 def _MainParse(options, _args): 946 def _MainParse(options, _args):
952 """Run input through annotated test parser. 947 """Run input through annotated test parser.
953 948
954 This doesn't execute a test, but reads test input from a file and runs it 949 This doesn't execute a test, but reads test input from a file and runs it
955 through the specified annotation parser. 950 through the specified annotation parser (aka log processor).
956 """ 951 """
957 if not options.annotate: 952 if not options.annotate:
958 raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense ' 953 raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense '
959 'without --annotate.') 954 'without --annotate.')
960 955
961 # If --annotate=list was passed, list the log parser classes and exit. 956 # If --annotate=list was passed, list the log processor classes and exit.
962 if _ListParsers(options.annotate): 957 if _ListLogProcessors(options.annotate):
963 return 0 958 return 0
964 959
965 tracker_class = _SelectResultsTracker(options) 960 log_processor_class = _SelectLogProcessor(options)
966 results_tracker = _CreateResultsTracker(tracker_class, options) 961 log_processor = _CreateLogProcessor(log_processor_class, options)
967 962
968 if options.generate_json_file: 963 if options.generate_json_file:
969 if os.path.exists(options.test_output_xml): 964 if os.path.exists(options.test_output_xml):
970 # remove the old XML output file. 965 # remove the old XML output file.
971 os.remove(options.test_output_xml) 966 os.remove(options.test_output_xml)
972 967
973 if options.parse_input == '-': 968 if options.parse_input == '-':
974 f = sys.stdin 969 f = sys.stdin
975 else: 970 else:
976 try: 971 try:
977 f = open(options.parse_input, 'rb') 972 f = open(options.parse_input, 'rb')
978 except IOError as e: 973 except IOError as e:
979 print 'Error %d opening \'%s\': %s' % (e.errno, options.parse_input, 974 print 'Error %d opening \'%s\': %s' % (e.errno, options.parse_input,
980 e.strerror) 975 e.strerror)
981 return 1 976 return 1
982 977
983 with f: 978 with f:
984 for line in f: 979 for line in f:
985 results_tracker.ProcessLine(line) 980 log_processor.ProcessLine(line)
986 981
987 if options.generate_json_file: 982 if options.generate_json_file:
988 if not _GenerateJSONForTestResults(options, results_tracker): 983 if not _GenerateJSONForTestResults(options, log_processor):
989 return 1 984 return 1
990 985
991 if options.annotate: 986 if options.annotate:
992 annotation_utils.annotate( 987 annotation_utils.annotate(
993 options.test_type, options.parse_result, results_tracker, 988 options.test_type, options.parse_result, log_processor,
994 options.factory_properties.get('full_test_name'), 989 options.factory_properties.get('full_test_name'),
995 perf_dashboard_id=options.perf_dashboard_id) 990 perf_dashboard_id=options.perf_dashboard_id)
996 991
997 return options.parse_result 992 return options.parse_result
998 993
999 994
1000 def _MainMac(options, args, extra_env): 995 def _MainMac(options, args, extra_env):
1001 """Runs the test on mac.""" 996 """Runs the test on mac."""
1002 if len(args) < 1: 997 if len(args) < 1:
1003 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 998 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
(...skipping 15 matching lines...) Expand all
1019 elif options.run_shell_script: 1014 elif options.run_shell_script:
1020 command = ['bash', test_exe_path] 1015 command = ['bash', test_exe_path]
1021 elif options.run_python_script: 1016 elif options.run_python_script:
1022 command = [sys.executable, test_exe] 1017 command = [sys.executable, test_exe]
1023 else: 1018 else:
1024 command = [test_exe_path] 1019 command = [test_exe_path]
1025 if options.annotate == 'gtest': 1020 if options.annotate == 'gtest':
1026 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode']) 1021 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode'])
1027 command.extend(args[1:]) 1022 command.extend(args[1:])
1028 1023
1029 # If --annotate=list was passed, list the log parser classes and exit. 1024 # If --annotate=list was passed, list the log processor classes and exit.
1030 if _ListParsers(options.annotate): 1025 if _ListLogProcessors(options.annotate):
1031 return 0 1026 return 0
1032 tracker_class = _SelectResultsTracker(options) 1027 log_processor_class = _SelectLogProcessor(options)
1033 results_tracker = _CreateResultsTracker(tracker_class, options) 1028 log_processor = _CreateLogProcessor(log_processor_class, options)
1034 1029
1035 if options.generate_json_file: 1030 if options.generate_json_file:
1036 if os.path.exists(options.test_output_xml): 1031 if os.path.exists(options.test_output_xml):
1037 # remove the old XML output file. 1032 # remove the old XML output file.
1038 os.remove(options.test_output_xml) 1033 os.remove(options.test_output_xml)
1039 1034
1040 try: 1035 try:
1041 http_server = None 1036 http_server = None
1042 if options.document_root: 1037 if options.document_root:
1043 http_server = _StartHttpServer('mac', build_dir=build_dir, 1038 http_server = _StartHttpServer('mac', build_dir=build_dir,
1044 test_exe_path=test_exe_path, 1039 test_exe_path=test_exe_path,
1045 document_root=options.document_root) 1040 document_root=options.document_root)
1046 1041
1047 if _UsingGtestJson(options): 1042 if _UsingGtestJson(options):
1048 json_file_name = results_tracker.PrepareJSONFile( 1043 json_file_name = log_processor.PrepareJSONFile(
1049 options.test_launcher_summary_output) 1044 options.test_launcher_summary_output)
1050 command.append('--test-launcher-summary-output=%s' % json_file_name) 1045 command.append('--test-launcher-summary-output=%s' % json_file_name)
1051 1046
1052 pipes = [] 1047 pipes = []
1053 if options.enable_asan: 1048 if options.enable_asan:
1054 symbolize = os.path.abspath(os.path.join('src', 'tools', 'valgrind', 1049 symbolize = os.path.abspath(os.path.join('src', 'tools', 'valgrind',
1055 'asan', 'asan_symbolize.py')) 1050 'asan', 'asan_symbolize.py'))
1056 pipes = [[sys.executable, symbolize], ['c++filt']] 1051 pipes = [[sys.executable, symbolize], ['c++filt']]
1057 1052
1058 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1053 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1059 command) 1054 command)
1060 result = _RunGTestCommand(command, extra_env, pipes=pipes, 1055 result = _RunGTestCommand(command, extra_env, pipes=pipes,
1061 results_tracker=results_tracker) 1056 log_processor=log_processor)
1062 finally: 1057 finally:
1063 if http_server: 1058 if http_server:
1064 http_server.StopServer() 1059 http_server.StopServer()
1065 if _UsingGtestJson(options): 1060 if _UsingGtestJson(options):
1066 _UploadGtestJsonSummary(json_file_name, 1061 _UploadGtestJsonSummary(json_file_name,
1067 options.build_properties, 1062 options.build_properties,
1068 test_exe) 1063 test_exe)
1069 results_tracker.ProcessJSONFile(options.build_dir) 1064 log_processor.ProcessJSONFile(options.build_dir)
1070 1065
1071 if options.generate_json_file: 1066 if options.generate_json_file:
1072 if not _GenerateJSONForTestResults(options, results_tracker): 1067 if not _GenerateJSONForTestResults(options, log_processor):
1073 return 1 1068 return 1
1074 1069
1075 if options.annotate: 1070 if options.annotate:
1076 annotation_utils.annotate( 1071 annotation_utils.annotate(
1077 options.test_type, result, results_tracker, 1072 options.test_type, result, log_processor,
1078 options.factory_properties.get('full_test_name'), 1073 options.factory_properties.get('full_test_name'),
1079 perf_dashboard_id=options.perf_dashboard_id) 1074 perf_dashboard_id=options.perf_dashboard_id)
1080 1075
1081 if options.results_url: 1076 if options.results_url:
1082 _SendResultsToDashboard( 1077 _SendResultsToDashboard(
1083 results_tracker, _GetPerfID(options), 1078 log_processor, _GetPerfID(options),
1084 options.test_type, options.results_url, options.build_dir, 1079 options.test_type, options.results_url, options.build_dir,
1085 options.build_properties.get('mastername'), 1080 options.build_properties.get('mastername'),
1086 options.build_properties.get('buildername'), 1081 options.build_properties.get('buildername'),
1087 options.build_properties.get('buildnumber'), 1082 options.build_properties.get('buildnumber'),
1088 options.supplemental_columns_file, 1083 options.supplemental_columns_file,
1089 options.perf_config) 1084 options.perf_config)
1090 1085
1091 return result 1086 return result
1092 1087
1093 1088
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1139 tmpdir = tempfile.mkdtemp() 1134 tmpdir = tempfile.mkdtemp()
1140 command = [test_exe_path, 1135 command = [test_exe_path,
1141 '-d', device, 1136 '-d', device,
1142 '-s', ios_version, 1137 '-s', ios_version,
1143 '-t', '120', 1138 '-t', '120',
1144 '-u', tmpdir, 1139 '-u', tmpdir,
1145 app_exe_path, '--' 1140 app_exe_path, '--'
1146 ] 1141 ]
1147 command.extend(args[1:]) 1142 command.extend(args[1:])
1148 1143
1149 # If --annotate=list was passed, list the log parser classes and exit. 1144 # If --annotate=list was passed, list the log processor classes and exit.
1150 if _ListParsers(options.annotate): 1145 if _ListLogProcessors(options.annotate):
1151 return 0 1146 return 0
1152 results_tracker = _CreateResultsTracker(_GetParsers()['gtest'], options) 1147 log_processor = _CreateLogProcessor(LOG_PROCESSOR_CLASSES['gtest'], options)
1153 1148
1154 # Make sure the simulator isn't running. 1149 # Make sure the simulator isn't running.
1155 kill_simulator() 1150 kill_simulator()
1156 1151
1157 # Nuke anything that appears to be stale chrome items in the temporary 1152 # Nuke anything that appears to be stale chrome items in the temporary
1158 # directory from previous test runs (i.e.- from crashes or unittest leaks). 1153 # directory from previous test runs (i.e.- from crashes or unittest leaks).
1159 slave_utils.RemoveChromeTemporaryFiles() 1154 slave_utils.RemoveChromeTemporaryFiles()
1160 1155
1161 dirs_to_cleanup = [tmpdir] 1156 dirs_to_cleanup = [tmpdir]
1162 crash_files_before = set([]) 1157 crash_files_before = set([])
1163 crash_files_after = set([]) 1158 crash_files_after = set([])
1164 crash_files_before = set(crash_utils.list_crash_logs()) 1159 crash_files_before = set(crash_utils.list_crash_logs())
1165 1160
1166 result = _RunGTestCommand(command, extra_env, results_tracker) 1161 result = _RunGTestCommand(command, extra_env, log_processor)
1167 1162
1168 # Because test apps kill themselves, iossim sometimes returns non-zero 1163 # Because test apps kill themselves, iossim sometimes returns non-zero
1169 # status even though all tests have passed. Check the results_tracker to 1164 # status even though all tests have passed. Check the log_processor to
1170 # see if the test run was successful. 1165 # see if the test run was successful.
1171 if results_tracker.CompletedWithoutFailure(): 1166 if log_processor.CompletedWithoutFailure():
1172 result = 0 1167 result = 0
1173 else: 1168 else:
1174 result = 1 1169 result = 1
1175 1170
1176 if result != 0: 1171 if result != 0:
1177 crash_utils.wait_for_crash_logs() 1172 crash_utils.wait_for_crash_logs()
1178 crash_files_after = set(crash_utils.list_crash_logs()) 1173 crash_files_after = set(crash_utils.list_crash_logs())
1179 1174
1180 kill_simulator() 1175 kill_simulator()
1181 1176
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
1272 elif options.run_shell_script: 1267 elif options.run_shell_script:
1273 command = ['bash', test_exe_path] 1268 command = ['bash', test_exe_path]
1274 elif options.run_python_script: 1269 elif options.run_python_script:
1275 command = [sys.executable, test_exe] 1270 command = [sys.executable, test_exe]
1276 else: 1271 else:
1277 command = [test_exe_path] 1272 command = [test_exe_path]
1278 if options.annotate == 'gtest': 1273 if options.annotate == 'gtest':
1279 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode']) 1274 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode'])
1280 command.extend(args[1:]) 1275 command.extend(args[1:])
1281 1276
1282 # If --annotate=list was passed, list the log parser classes and exit. 1277 # If --annotate=list was passed, list the log processor classes and exit.
1283 if _ListParsers(options.annotate): 1278 if _ListLogProcessors(options.annotate):
1284 return 0 1279 return 0
1285 tracker_class = _SelectResultsTracker(options) 1280 log_processor_class = _SelectLogProcessor(options)
1286 results_tracker = _CreateResultsTracker(tracker_class, options) 1281 log_processor = _CreateLogProcessor(log_processor_class, options)
1287 1282
1288 if options.generate_json_file: 1283 if options.generate_json_file:
1289 if os.path.exists(options.test_output_xml): 1284 if os.path.exists(options.test_output_xml):
1290 # remove the old XML output file. 1285 # remove the old XML output file.
1291 os.remove(options.test_output_xml) 1286 os.remove(options.test_output_xml)
1292 1287
1293 try: 1288 try:
1294 start_xvfb = False 1289 start_xvfb = False
1295 http_server = None 1290 http_server = None
1296 json_file_name = None 1291 json_file_name = None
(...skipping 10 matching lines...) Expand all
1307 'layout_test_wrapper' in test_exe or 1302 'layout_test_wrapper' in test_exe or
1308 'devtools_perf_test_wrapper' in test_exe) 1303 'devtools_perf_test_wrapper' in test_exe)
1309 if start_xvfb: 1304 if start_xvfb:
1310 xvfb.StartVirtualX( 1305 xvfb.StartVirtualX(
1311 slave_name, bin_dir, 1306 slave_name, bin_dir,
1312 with_wm=(options.factory_properties.get('window_manager', 'True') == 1307 with_wm=(options.factory_properties.get('window_manager', 'True') ==
1313 'True'), 1308 'True'),
1314 server_dir=special_xvfb_dir) 1309 server_dir=special_xvfb_dir)
1315 1310
1316 if _UsingGtestJson(options): 1311 if _UsingGtestJson(options):
1317 json_file_name = results_tracker.PrepareJSONFile( 1312 json_file_name = log_processor.PrepareJSONFile(
1318 options.test_launcher_summary_output) 1313 options.test_launcher_summary_output)
1319 command.append('--test-launcher-summary-output=%s' % json_file_name) 1314 command.append('--test-launcher-summary-output=%s' % json_file_name)
1320 1315
1321 pipes = [] 1316 pipes = []
1322 # See the comment in main() regarding offline symbolization. 1317 # See the comment in main() regarding offline symbolization.
1323 if (options.enable_asan or options.enable_msan) and not options.enable_lsan: 1318 if (options.enable_asan or options.enable_msan) and not options.enable_lsan:
1324 symbolize = os.path.abspath(os.path.join('src', 'tools', 'valgrind', 1319 symbolize = os.path.abspath(os.path.join('src', 'tools', 'valgrind',
1325 'asan', 'asan_symbolize.py')) 1320 'asan', 'asan_symbolize.py'))
1326 asan_symbolize = [sys.executable, symbolize] 1321 asan_symbolize = [sys.executable, symbolize]
1327 if options.strip_path_prefix: 1322 if options.strip_path_prefix:
1328 asan_symbolize.append(options.strip_path_prefix) 1323 asan_symbolize.append(options.strip_path_prefix)
1329 pipes = [asan_symbolize] 1324 pipes = [asan_symbolize]
1330 1325
1331 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1326 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1332 command) 1327 command)
1333 result = _RunGTestCommand(command, extra_env, pipes=pipes, 1328 result = _RunGTestCommand(command, extra_env, pipes=pipes,
1334 results_tracker=results_tracker) 1329 log_processor=log_processor)
1335 finally: 1330 finally:
1336 if http_server: 1331 if http_server:
1337 http_server.StopServer() 1332 http_server.StopServer()
1338 if start_xvfb: 1333 if start_xvfb:
1339 xvfb.StopVirtualX(slave_name) 1334 xvfb.StopVirtualX(slave_name)
1340 if _UsingGtestJson(options): 1335 if _UsingGtestJson(options):
1341 if json_file_name: 1336 if json_file_name:
1342 _UploadGtestJsonSummary(json_file_name, 1337 _UploadGtestJsonSummary(json_file_name,
1343 options.build_properties, 1338 options.build_properties,
1344 test_exe) 1339 test_exe)
1345 results_tracker.ProcessJSONFile(options.build_dir) 1340 log_processor.ProcessJSONFile(options.build_dir)
1346 1341
1347 if options.generate_json_file: 1342 if options.generate_json_file:
1348 if not _GenerateJSONForTestResults(options, results_tracker): 1343 if not _GenerateJSONForTestResults(options, log_processor):
1349 return 1 1344 return 1
1350 1345
1351 if options.annotate: 1346 if options.annotate:
1352 annotation_utils.annotate( 1347 annotation_utils.annotate(
1353 options.test_type, result, results_tracker, 1348 options.test_type, result, log_processor,
1354 options.factory_properties.get('full_test_name'), 1349 options.factory_properties.get('full_test_name'),
1355 perf_dashboard_id=options.perf_dashboard_id) 1350 perf_dashboard_id=options.perf_dashboard_id)
1356 1351
1357 if options.results_url: 1352 if options.results_url:
1358 _SendResultsToDashboard( 1353 _SendResultsToDashboard(
1359 results_tracker, _GetPerfID(options), 1354 log_processor, _GetPerfID(options),
1360 options.test_type, options.results_url, options.build_dir, 1355 options.test_type, options.results_url, options.build_dir,
1361 options.build_properties.get('mastername'), 1356 options.build_properties.get('mastername'),
1362 options.build_properties.get('buildername'), 1357 options.build_properties.get('buildername'),
1363 options.build_properties.get('buildnumber'), 1358 options.build_properties.get('buildnumber'),
1364 options.supplemental_columns_file, 1359 options.supplemental_columns_file,
1365 options.perf_config) 1360 options.perf_config)
1366 1361
1367 return result 1362 return result
1368 1363
1369 1364
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1421 'agent_logger.exe'), 1416 'agent_logger.exe'),
1422 'start', 1417 'start',
1423 '--output-file=%s' % logfile, 1418 '--output-file=%s' % logfile,
1424 '--'] + command 1419 '--'] + command
1425 command.extend(args[1:]) 1420 command.extend(args[1:])
1426 1421
1427 # Nuke anything that appears to be stale chrome items in the temporary 1422 # Nuke anything that appears to be stale chrome items in the temporary
1428 # directory from previous test runs (i.e.- from crashes or unittest leaks). 1423 # directory from previous test runs (i.e.- from crashes or unittest leaks).
1429 slave_utils.RemoveChromeTemporaryFiles() 1424 slave_utils.RemoveChromeTemporaryFiles()
1430 1425
1431 # If --annotate=list was passed, list the log parser classes and exit. 1426 # If --annotate=list was passed, list the log processor classes and exit.
1432 if _ListParsers(options.annotate): 1427 if _ListLogProcessors(options.annotate):
1433 return 0 1428 return 0
1434 tracker_class = _SelectResultsTracker(options) 1429 log_processor_class = _SelectLogProcessor(options)
1435 results_tracker = _CreateResultsTracker(tracker_class, options) 1430 log_processor = _CreateLogProcessor(log_processor_class, options)
1436 1431
1437 if options.generate_json_file: 1432 if options.generate_json_file:
1438 if os.path.exists(options.test_output_xml): 1433 if os.path.exists(options.test_output_xml):
1439 # remove the old XML output file. 1434 # remove the old XML output file.
1440 os.remove(options.test_output_xml) 1435 os.remove(options.test_output_xml)
1441 1436
1442 try: 1437 try:
1443 http_server = None 1438 http_server = None
1444 if options.document_root: 1439 if options.document_root:
1445 http_server = _StartHttpServer('win', build_dir=build_dir, 1440 http_server = _StartHttpServer('win', build_dir=build_dir,
1446 test_exe_path=test_exe_path, 1441 test_exe_path=test_exe_path,
1447 document_root=options.document_root) 1442 document_root=options.document_root)
1448 1443
1449 if _UsingGtestJson(options): 1444 if _UsingGtestJson(options):
1450 json_file_name = results_tracker.PrepareJSONFile( 1445 json_file_name = log_processor.PrepareJSONFile(
1451 options.test_launcher_summary_output) 1446 options.test_launcher_summary_output)
1452 command.append('--test-launcher-summary-output=%s' % json_file_name) 1447 command.append('--test-launcher-summary-output=%s' % json_file_name)
1453 1448
1454 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, 1449 command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
1455 command) 1450 command)
1456 result = _RunGTestCommand(command, extra_env, results_tracker) 1451 result = _RunGTestCommand(command, extra_env, log_processor)
1457 finally: 1452 finally:
1458 if http_server: 1453 if http_server:
1459 http_server.StopServer() 1454 http_server.StopServer()
1460 if _UsingGtestJson(options): 1455 if _UsingGtestJson(options):
1461 _UploadGtestJsonSummary(json_file_name, 1456 _UploadGtestJsonSummary(json_file_name,
1462 options.build_properties, 1457 options.build_properties,
1463 test_exe) 1458 test_exe)
1464 results_tracker.ProcessJSONFile(options.build_dir) 1459 log_processor.ProcessJSONFile(options.build_dir)
1465 1460
1466 if options.enable_pageheap: 1461 if options.enable_pageheap:
1467 slave_utils.SetPageHeap(build_dir, 'chrome.exe', False) 1462 slave_utils.SetPageHeap(build_dir, 'chrome.exe', False)
1468 1463
1469 if options.generate_json_file: 1464 if options.generate_json_file:
1470 if not _GenerateJSONForTestResults(options, results_tracker): 1465 if not _GenerateJSONForTestResults(options, log_processor):
1471 return 1 1466 return 1
1472 1467
1473 if options.annotate: 1468 if options.annotate:
1474 annotation_utils.annotate( 1469 annotation_utils.annotate(
1475 options.test_type, result, results_tracker, 1470 options.test_type, result, log_processor,
1476 options.factory_properties.get('full_test_name'), 1471 options.factory_properties.get('full_test_name'),
1477 perf_dashboard_id=options.perf_dashboard_id) 1472 perf_dashboard_id=options.perf_dashboard_id)
1478 1473
1479 if options.results_url: 1474 if options.results_url:
1480 _SendResultsToDashboard( 1475 _SendResultsToDashboard(
1481 results_tracker, _GetPerfID(options), 1476 log_processor, _GetPerfID(options),
1482 options.test_type, options.results_url, options.build_dir, 1477 options.test_type, options.results_url, options.build_dir,
1483 options.build_properties.get('mastername'), 1478 options.build_properties.get('mastername'),
1484 options.build_properties.get('buildername'), 1479 options.build_properties.get('buildername'),
1485 options.build_properties.get('buildnumber'), 1480 options.build_properties.get('buildnumber'),
1486 options.supplemental_columns_file, 1481 options.supplemental_columns_file,
1487 options.perf_config) 1482 options.perf_config)
1488 1483
1489 return result 1484 return result
1490 1485
1491 1486
(...skipping 11 matching lines...) Expand all
1503 1498
1504 Returns: 1499 Returns:
1505 Exit status code. 1500 Exit status code.
1506 """ 1501 """
1507 if options.run_python_script: 1502 if options.run_python_script:
1508 return _MainLinux(options, args, extra_env) 1503 return _MainLinux(options, args, extra_env)
1509 1504
1510 if len(args) < 1: 1505 if len(args) < 1:
1511 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 1506 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
1512 1507
1513 if _ListParsers(options.annotate): 1508 if _ListLogProcessors(options.annotate):
1514 return 0 1509 return 0
1515 tracker_class = _SelectResultsTracker(options) 1510 log_processor_class = _SelectLogProcessor(options)
1516 results_tracker = _CreateResultsTracker(tracker_class, options) 1511 log_processor = _CreateLogProcessor(log_processor_class, options)
1517 1512
1518 if options.generate_json_file: 1513 if options.generate_json_file:
1519 if os.path.exists(options.test_output_xml): 1514 if os.path.exists(options.test_output_xml):
1520 # remove the old XML output file. 1515 # remove the old XML output file.
1521 os.remove(options.test_output_xml) 1516 os.remove(options.test_output_xml)
1522 1517
1523 # Assume it's a gtest apk, so use the android harness. 1518 # Assume it's a gtest apk, so use the android harness.
1524 test_suite = args[0] 1519 test_suite = args[0]
1525 run_test_target_option = '--release' 1520 run_test_target_option = '--release'
1526 if options.target == 'Debug': 1521 if options.target == 'Debug':
1527 run_test_target_option = '--debug' 1522 run_test_target_option = '--debug'
1528 command = ['src/build/android/test_runner.py', 'gtest', 1523 command = ['src/build/android/test_runner.py', 'gtest',
1529 run_test_target_option, '-s', test_suite] 1524 run_test_target_option, '-s', test_suite]
1530 result = _RunGTestCommand(command, extra_env, results_tracker=results_tracker) 1525 result = _RunGTestCommand(command, extra_env, log_processor=log_processor)
1531 1526
1532 if options.generate_json_file: 1527 if options.generate_json_file:
1533 if not _GenerateJSONForTestResults(options, results_tracker): 1528 if not _GenerateJSONForTestResults(options, log_processor):
1534 return 1 1529 return 1
1535 1530
1536 if options.annotate: 1531 if options.annotate:
1537 annotation_utils.annotate( 1532 annotation_utils.annotate(
1538 options.test_type, result, results_tracker, 1533 options.test_type, result, log_processor,
1539 options.factory_properties.get('full_test_name'), 1534 options.factory_properties.get('full_test_name'),
1540 perf_dashboard_id=options.perf_dashboard_id) 1535 perf_dashboard_id=options.perf_dashboard_id)
1541 1536
1542 if options.results_url: 1537 if options.results_url:
1543 _SendResultsToDashboard( 1538 _SendResultsToDashboard(
1544 results_tracker, _GetPerfID(options), 1539 log_processor, _GetPerfID(options),
1545 options.test_type, options.results_url, options.build_dir, 1540 options.test_type, options.results_url, options.build_dir,
1546 options.build_properties.get('mastername'), 1541 options.build_properties.get('mastername'),
1547 options.build_properties.get('buildername'), 1542 options.build_properties.get('buildername'),
1548 options.build_properties.get('buildnumber'), 1543 options.build_properties.get('buildnumber'),
1549 options.supplemental_columns_file, 1544 options.supplemental_columns_file,
1550 options.perf_config) 1545 options.perf_config)
1551 1546
1552 return result 1547 return result
1553 1548
1554 1549
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
1753 options.perf_dashboard_id = options.factory_properties.get('test_name') 1748 options.perf_dashboard_id = options.factory_properties.get('test_name')
1754 1749
1755 options.test_type = options.test_type or options.factory_properties.get( 1750 options.test_type = options.test_type or options.factory_properties.get(
1756 'step_name', '') 1751 'step_name', '')
1757 1752
1758 if options.run_shell_script and options.run_python_script: 1753 if options.run_shell_script and options.run_python_script:
1759 sys.stderr.write('Use either --run-shell-script OR --run-python-script, ' 1754 sys.stderr.write('Use either --run-shell-script OR --run-python-script, '
1760 'not both.') 1755 'not both.')
1761 return 1 1756 return 1
1762 1757
1763 # Print out builder name for log_parser
1764 print '[Running on builder: "%s"]' % options.builder_name 1758 print '[Running on builder: "%s"]' % options.builder_name
1765 1759
1766 did_launch_dbus = False 1760 did_launch_dbus = False
1767 if not options.no_spawn_dbus: 1761 if not options.no_spawn_dbus:
1768 did_launch_dbus = _LaunchDBus() 1762 did_launch_dbus = _LaunchDBus()
1769 1763
1770 try: 1764 try:
1771 options.build_dir = build_directory.GetBuildOutputDirectory() 1765 options.build_dir = build_directory.GetBuildOutputDirectory()
1772 1766
1773 if options.pass_target and options.target: 1767 if options.pass_target and options.target:
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
1949 finally: 1943 finally:
1950 if did_launch_dbus: 1944 if did_launch_dbus:
1951 # It looks like the command line argument --exit-with-session 1945 # It looks like the command line argument --exit-with-session
1952 # isn't working to clean up the spawned dbus-daemon. Kill it 1946 # isn't working to clean up the spawned dbus-daemon. Kill it
1953 # manually. 1947 # manually.
1954 _ShutdownDBus() 1948 _ShutdownDBus()
1955 1949
1956 1950
1957 if '__main__' == __name__: 1951 if '__main__' == __name__:
1958 sys.exit(main()) 1952 sys.exit(main())
OLDNEW
« no previous file with comments | « scripts/slave/process_log_utils.py ('k') | scripts/slave/unittests/performance_log_processor_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698