OLD | NEW |
(Empty) | |
| 1 //#!/usr/bin/env dart |
| 2 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 3 // for details. All rights reserved. Use of this source code is governed by a |
| 4 // BSD-style license that can be found in the LICENSE file. |
| 5 |
| 6 /** |
| 7 * testrunner is a program to run Dart unit tests. Unlike ~/tools/test.dart, |
| 8 * this program is intended for 3rd parties to be able to run unit tests in |
| 9 * a batched fashion. As such, it adds some features and removes others. Some |
| 10 * of the removed features are: |
| 11 * |
| 12 * - No support for test.status files. The assumption is that tests are |
| 13 * expected to pass. |
| 14 * - A restricted set of runtimes. The assumption here is that the Dart |
| 15 * libraries deal with platform dependencies, and so the primary |
| 16 * SKUs that a user of this app would be concerned with would be |
| 17 * Dart-native versus compiled, and client (browser) vs server. To |
| 18 * support these, four runtimes are allowed: 'drt-dart' and 'drt-js' (for |
| 19 * client native and client-compiled, respectively), and 'vm' and 'd8' |
| 20 * (for server-side native and compiled, respectively). |
| 21 * - No sharding of test processes. |
| 22 * |
| 23 * On the other hand, a number of features have been added: |
| 24 * |
| 25 * - The ability to filter tests by group or name. |
| 26 * - The ability to run tests in isolates. |
| 27 * - The ability to customize the format of the test result messages. |
| 28 * - The ability to list the tests available. |
| 29 * |
| 30 * By default, testrunner will run all tests in the current directory. |
| 31 * With a -R option, it will recurse into subdirectories. |
| 32 * Directories can also be specified on the command line; if |
| 33 * any are specified they will override the use of the current directory. |
| 34 * All files that match the --test-file-pat will be included; by default |
| 35 * this is files with names that end in _test.dart. |
| 36 * |
| 37 * Options can be specified on the command line, via a configuration |
| 38 * file (--config) or via a test.config file in the test directory, |
| 39 * in decreasing order of priority. |
| 40 * |
| 41 * TODO(gram) - Layout tests. The plan here will be to look for a file |
| 42 * with a .layout extension that corresponds to the .dart file, that contains |
| 43 * multiple layouts, one for each test. Each test will be run in its own |
| 44 * instance of DRT and and the result compared with the expected layout. |
| 45 * |
| 46 */ |
| 47 #library('testrunner'); |
| 48 #import('dart:io'); |
| 49 #import('dart:isolate'); |
| 50 #import('dart:math'); |
| 51 #import('../../pkg/args/args.dart'); |
| 52 |
| 53 #source('dart_wrap_task.dart'); |
| 54 #source('delete_task.dart'); |
| 55 #source('html_wrap_task.dart'); |
| 56 #source('meta.dart'); |
| 57 #source('options.dart'); |
| 58 #source('pipeline_runner.dart'); |
| 59 #source('pipeline_task.dart'); |
| 60 #source('run_process_task.dart'); |
| 61 #source('utils.dart'); |
| 62 |
| 63 // The set of [PipelineRunner]s to execute. |
| 64 List tasks; |
| 65 ArgResults configuration; |
| 66 // The maximum number of pipelines that can run concurrently. |
| 67 int maxTasks; |
| 68 // The number of pipelines currently running. |
| 69 int numTasks; |
| 70 // The index of the next pipeline runner to execute. |
| 71 int nextTask; |
| 72 // Whether to capture all output from a test or just the result. |
| 73 bool verbose; |
| 74 // The timeout to use on running processes. |
| 75 int timeout; |
| 76 // The stream to use for high-value messages, like test results. |
| 77 OutputStream outStream; |
| 78 // The stream to use for low-value messages, like verbose output. |
| 79 OutputStream logStream; |
| 80 |
| 81 // The user can specify output streams on the command line, using 'none', |
| 82 // 'stdout', 'stderr', or a file path; [getStream] will take such a name |
| 83 // and return an appropriate [OutputStream]. |
| 84 OutputStream getStream(String name) { |
| 85 if (name == 'none') { |
| 86 return null; |
| 87 } |
| 88 if (name == 'stdout') { |
| 89 return stdout; |
| 90 } |
| 91 if (name == 'stderr') { |
| 92 return stderr; |
| 93 } |
| 94 return new File(name).openOutputStream(FileMode.WRITE); |
| 95 } |
| 96 |
| 97 /** |
| 98 * Generate a templated list of commands that should be executed for each test |
| 99 * file. Each command is an instance of a [PipelineTask]. |
| 100 * The commands can make use of a number of metatokens that will be |
| 101 * expanded before execution (see the [Meta] class for details). |
| 102 */ |
| 103 List getPipelineTemplate(String runtime, bool checkedMode, bool keepTests) { |
| 104 var pipeline = new List(); |
| 105 var pathSep = Platform.pathSeparator; |
| 106 var tempDirPath = configuration['tempdir']; |
| 107 Directory tempDir = new Directory(tempDirPath); |
| 108 |
| 109 if (!tempDir.existsSync()) { |
| 110 tempDir.createSync(); |
| 111 } |
| 112 var mustCleanupJavascript = false; |
| 113 var mustCleanupHtml = false; |
| 114 |
| 115 // Templates for the generated files that are used to run the wrapped test. |
| 116 var tempDartFile = '$tempDirPath$pathSep${Meta.filenameNoExtension}.dart'; |
| 117 var tempJsFile = '$tempDirPath$pathSep${Meta.filenameNoExtension}.js'; |
| 118 var tempHTMLFile = '$tempDirPath$pathSep${Meta.filenameNoExtension}.html'; |
| 119 var tempCSSFile = '$tempDirPath$pathSep${Meta.filenameNoExtension}.css'; |
| 120 |
| 121 // Add step for wrapping in Dart scaffold. |
| 122 pipeline.add(new DartWrapTask(Meta.fullFilePath, tempDartFile)); |
| 123 |
| 124 // Add the compiler step, unless we are running native Dart. |
| 125 if (runtime != 'vm' && runtime != 'drt-dart') { |
| 126 mustCleanupJavascript = true; |
| 127 if (checkedMode) { |
| 128 pipeline.add(new RunProcessTask(configuration['dart2js'], |
| 129 [ '--enable_checked_mode', '--out=$tempJsFile', |
| 130 tempDartFile ], timeout)); |
| 131 } else { |
| 132 pipeline.add(new RunProcessTask(configuration['dart2js'], |
| 133 [ '--out=$tempJsFile', tempDartFile ], timeout)); |
| 134 } |
| 135 } |
| 136 |
| 137 // Add step for wrapping in HTML, if we are running in DRT. |
| 138 if (runtime == 'drt-dart' || runtime == 'drt-js') { |
| 139 mustCleanupHtml = true; |
| 140 // The user can have pre-existing HTML and CSS files for the test in the |
| 141 // same directory and using the same name. The paths to these are matched |
| 142 // by these two templates. |
| 143 var HTMLFile = '${Meta.directory}$pathSep${Meta.filenameNoExtension}.html'; |
| 144 var CSSFile = '${Meta.directory}$pathSep${Meta.filenameNoExtension}.css'; |
| 145 pipeline.add(new HtmlWrapTask(Meta.fullFilePath, |
| 146 HTMLFile, tempHTMLFile, CSSFile, tempCSSFile)); |
| 147 } |
| 148 |
| 149 // Add the execution step. |
| 150 if (runtime == 'vm') { |
| 151 if (checkedMode) { |
| 152 pipeline.add(new RunProcessTask(configuration['dart'], |
| 153 [ '--enable_asserts', '--enable_typechecks', tempDartFile ], |
| 154 timeout)); |
| 155 } else { |
| 156 pipeline.add(new RunProcessTask(configuration['dart'], |
| 157 [ tempDartFile ], timeout)); |
| 158 } |
| 159 } else if (runtime == 'drt-dart' || runtime == 'drt-js') { |
| 160 pipeline.add(new RunProcessTask(configuration['drt'], |
| 161 [ '--no-timeout', tempHTMLFile ], timeout)); |
| 162 } else if (runtime == 'd8') { |
| 163 pipeline.add(new RunProcessTask(configuration['d8'], |
| 164 [ tempJsFile ], timeout)); |
| 165 } |
| 166 |
| 167 // Add the cleanup steps. |
| 168 if (!keepTests) { |
| 169 pipeline.add(new DeleteTask(tempDartFile)); |
| 170 |
| 171 if (mustCleanupJavascript) { |
| 172 pipeline.add(new DeleteTask(tempJsFile)); |
| 173 } |
| 174 |
| 175 if (mustCleanupHtml) { |
| 176 pipeline.add(new DeleteTask(tempHTMLFile)); |
| 177 pipeline.add(new DeleteTask(tempCSSFile)); |
| 178 } |
| 179 } |
| 180 |
| 181 return pipeline; |
| 182 } |
| 183 |
| 184 // Once we have enumerated all the test files, we call [processTests] to |
| 185 // handle the next step - either listing the files or creating and executing |
| 186 // pipelines for the files. |
| 187 void processTests(List pipelineTemplate, List testFiles) { |
| 188 outStream = getStream(configuration['out']); |
| 189 logStream = getStream(configuration['log']); |
| 190 if (configuration['list-files']) { |
| 191 if (outStream != null) { |
| 192 for (var i = 0; i < testFiles.length; i++) { |
| 193 outStream.writeString(testFiles[i]); |
| 194 outStream.writeString('\n'); |
| 195 } |
| 196 } |
| 197 } else { |
| 198 // Create execution pipelines for each test file from the pipeline |
| 199 // template and the concrete test file path, and then kick |
| 200 // off execution of the first batch. |
| 201 tasks = new List(); |
| 202 for (var i = 0; i < testFiles.length; i++) { |
| 203 tasks.add(new PipelineRunner(pipelineTemplate, testFiles[i], verbose, |
| 204 completeHandler)); |
| 205 } |
| 206 |
| 207 maxTasks = min(parseInt(configuration['tasks']), testFiles.length); |
| 208 numTasks = 0; |
| 209 nextTask = 0; |
| 210 spawnTasks(); |
| 211 } |
| 212 } |
| 213 |
| 214 // Execute as many tasks as we can up to the maxTasks limit. |
| 215 void spawnTasks() { |
| 216 while (numTasks < maxTasks && nextTask < tasks.length) { |
| 217 ++numTasks; |
| 218 tasks[nextTask++].execute(); |
| 219 } |
| 220 } |
| 221 |
| 222 // Handle the completion of a task. Kick off more tasks if we |
| 223 // have them. |
| 224 void completeHandler(String testFile, |
| 225 int exitCode, |
| 226 List _stdout, |
| 227 List _stderr) { |
| 228 writelog(_stdout, outStream, logStream); |
| 229 writelog(_stderr, outStream, logStream); |
| 230 --numTasks; |
| 231 if (exitCode == 0 || !configuration['stop-on-failure']) { |
| 232 spawnTasks(); |
| 233 } |
| 234 if (numTasks == 0) { |
| 235 // No outstanding tasks; we're all done. |
| 236 // We could later print a summary report here. |
| 237 } |
| 238 } |
| 239 |
| 240 // Our tests are configured so that critical messages have a '###' prefix. |
| 241 // [writeLog] takes the output from a pipeline execution and writes it to |
| 242 // our output streams. It will strip the '###' if necessary on critical |
| 243 // messages; other messages will only be written if verbose output was |
| 244 // specified. |
| 245 void writelog(List messages, OutputStream out, OutputStream log) { |
| 246 for (var i = 0; i < messages.length; i++) { |
| 247 var msg = messages[i]; |
| 248 if (msg.startsWith('###')) { |
| 249 if (out != null) { |
| 250 out.writeString(msg.substring(3)); |
| 251 out.writeString('\n'); |
| 252 } |
| 253 } else if (verbose) { |
| 254 if (log != null) { |
| 255 log.writeString(msg); |
| 256 log.writeString('\n'); |
| 257 } |
| 258 } |
| 259 } |
| 260 } |
| 261 |
| 262 main() { |
| 263 var optionsParser = getOptionParser(); |
| 264 configuration = loadConfiguration(optionsParser); |
| 265 if (sane(configuration)) { |
| 266 if (configuration['list-options']) { |
| 267 printOptions(optionsParser, configuration, false, stdout); |
| 268 } else if (configuration['list-all-options']) { |
| 269 printOptions(optionsParser, configuration, true, stdout); |
| 270 } else { |
| 271 // Pull out some useful config stuff. |
| 272 timeout = parseInt(configuration['timeout']); |
| 273 verbose = configuration['log'] != 'none'; |
| 274 if (configuration['list-groups']) { |
| 275 verbose = false; |
| 276 } |
| 277 // Build the command templates needed for test compile and execute. |
| 278 var runtime = configuration['runtime']; |
| 279 var checkedMode = configuration['checked']; |
| 280 var keepTests = configuration['keep-generated-tests'] && |
| 281 !(configuration['list-groups'] || configuration['list-tests']); |
| 282 var pipelineTemplate = |
| 283 getPipelineTemplate(runtime, checkedMode, keepTests); |
| 284 |
| 285 if (pipelineTemplate != null) { |
| 286 // Build the list of tests and then execute them. |
| 287 List dirs = configuration.rest; |
| 288 bool recurse = configuration['recurse']; |
| 289 if (dirs.length == 0) { |
| 290 dirs.add('.'); // Use current working directory as default. |
| 291 } |
| 292 buildFileList(dirs, |
| 293 new RegExp(configuration['test-file-pat']), recurse, |
| 294 (f)=>processTests(pipelineTemplate, f)); |
| 295 } |
| 296 } |
| 297 } |
| 298 } |
| 299 |
| 300 |
OLD | NEW |