OLD | NEW |
1 library tokenizer_test; | 1 library tokenizer_test; |
2 | 2 |
3 // Note: mirrors used to match the getattr usage in the original test | 3 // Note: mirrors used to match the getattr usage in the original test |
4 import 'dart:io'; | 4 import 'dart:io'; |
5 import 'dart:json'; | 5 import 'dart:json'; |
6 import 'dart:mirrors'; | 6 import 'dart:mirrors'; |
7 import 'package:unittest/unittest.dart'; | 7 import 'package:unittest/unittest.dart'; |
8 import 'package:unittest/vm_config.dart'; | 8 import 'package:unittest/vm_config.dart'; |
9 import 'package:html5lib/src/char_encodings.dart'; | 9 import 'package:html5lib/src/char_encodings.dart'; |
10 import 'package:html5lib/src/constants.dart' as constants; | 10 import 'package:html5lib/src/constants.dart' as constants; |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
138 // use the actual logging library. | 138 // use the actual logging library. |
139 outputTokens.add(["ParseError", token.data]); | 139 outputTokens.add(["ParseError", token.data]); |
140 } | 140 } |
141 } | 141 } |
142 | 142 |
143 List concatenateCharacterTokens(List tokens) { | 143 List concatenateCharacterTokens(List tokens) { |
144 var outputTokens = []; | 144 var outputTokens = []; |
145 for (var token in tokens) { | 145 for (var token in tokens) { |
146 if (token.indexOf("ParseError") == -1 && token[0] == "Character") { | 146 if (token.indexOf("ParseError") == -1 && token[0] == "Character") { |
147 if (outputTokens.length > 0 && | 147 if (outputTokens.length > 0 && |
148 outputTokens.last().indexOf("ParseError") == -1 && | 148 outputTokens.last.indexOf("ParseError") == -1 && |
149 outputTokens.last()[0] == "Character") { | 149 outputTokens.last[0] == "Character") { |
150 | 150 |
151 outputTokens.last()[1] = '${outputTokens.last()[1]}${token[1]}'; | 151 outputTokens.last[1] = '${outputTokens.last[1]}${token[1]}'; |
152 } else { | 152 } else { |
153 outputTokens.add(token); | 153 outputTokens.add(token); |
154 } | 154 } |
155 } else { | 155 } else { |
156 outputTokens.add(token); | 156 outputTokens.add(token); |
157 } | 157 } |
158 } | 158 } |
159 return outputTokens; | 159 return outputTokens; |
160 } | 160 } |
161 | 161 |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
261 } | 261 } |
262 } | 262 } |
263 return testInfo; | 263 return testInfo; |
264 } | 264 } |
265 | 265 |
266 | 266 |
267 String camelCase(String s) { | 267 String camelCase(String s) { |
268 s = s.toLowerCase(); | 268 s = s.toLowerCase(); |
269 var result = new StringBuffer(); | 269 var result = new StringBuffer(); |
270 for (var match in const RegExp(r"\W+(\w)(\w+)").allMatches(s)) { | 270 for (var match in const RegExp(r"\W+(\w)(\w+)").allMatches(s)) { |
271 if (result.length == 0) result.add(s.substring(0, match.start())); | 271 if (result.length == 0) result.add(s.substring(0, match.start)); |
272 result.add(match.group(1).toUpperCase()); | 272 result.add(match.group(1).toUpperCase()); |
273 result.add(match.group(2)); | 273 result.add(match.group(2)); |
274 } | 274 } |
275 return result.toString(); | 275 return result.toString(); |
276 } | 276 } |
277 | 277 |
278 void main() { | 278 void main() { |
279 useVmConfiguration(); | 279 useVmConfiguration(); |
280 getDataFiles('tokenizer', (p) => p.endsWith('.test')).then((files) { | 280 getDataFiles('tokenizer', (p) => p.endsWith('.test')).then((files) { |
281 for (var path in files) { | 281 for (var path in files) { |
(...skipping 13 matching lines...) Expand all Loading... |
295 test(testInfo["description"], () { | 295 test(testInfo["description"], () { |
296 testInfo["initialState"] = camelCase(initialState); | 296 testInfo["initialState"] = camelCase(initialState); |
297 runTokenizerTest(testInfo); | 297 runTokenizerTest(testInfo); |
298 }); | 298 }); |
299 } | 299 } |
300 } | 300 } |
301 }); | 301 }); |
302 } | 302 } |
303 }); | 303 }); |
304 } | 304 } |
OLD | NEW |