OLD | NEW |
(Empty) | |
| 1 # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. |
| 2 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr |
| 3 # |
| 4 # This file is part of logilab-common. |
| 5 # |
| 6 # logilab-common is free software: you can redistribute it and/or modify it unde
r |
| 7 # the terms of the GNU Lesser General Public License as published by the Free |
| 8 # Software Foundation, either version 2.1 of the License, or (at your option) an
y |
| 9 # later version. |
| 10 # |
| 11 # logilab-common is distributed in the hope that it will be useful, but WITHOUT |
| 12 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| 13 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more |
| 14 # details. |
| 15 # |
| 16 # You should have received a copy of the GNU Lesser General Public License along |
| 17 # with logilab-common. If not, see <http://www.gnu.org/licenses/>. |
| 18 """pytest is a tool that eases test running and debugging. |
| 19 |
| 20 To be able to use pytest, you should either write tests using |
| 21 the logilab.common.testlib's framework or the unittest module of the |
| 22 Python's standard library. |
| 23 |
| 24 You can customize pytest's behaviour by defining a ``pytestconf.py`` file |
| 25 somewhere in your test directory. In this file, you can add options or |
| 26 change the way tests are run. |
| 27 |
| 28 To add command line options, you must define a ``update_parser`` function in |
| 29 your ``pytestconf.py`` file. The function must accept a single parameter |
| 30 that will be the OptionParser's instance to customize. |
| 31 |
| 32 If you wish to customize the tester, you'll have to define a class named |
| 33 ``CustomPyTester``. This class should extend the default `PyTester` class |
| 34 defined in the pytest module. Take a look at the `PyTester` and `DjangoTester` |
| 35 classes for more information about what can be done. |
| 36 |
| 37 For instance, if you wish to add a custom -l option to specify a loglevel, you |
| 38 could define the following ``pytestconf.py`` file :: |
| 39 |
| 40 import logging |
| 41 from logilab.common.pytest import PyTester |
| 42 |
| 43 def update_parser(parser): |
| 44 parser.add_option('-l', '--loglevel', dest='loglevel', action='store', |
| 45 choices=('debug', 'info', 'warning', 'error', 'critica
l'), |
| 46 default='critical', help="the default log level possib
le choices are " |
| 47 "('debug', 'info', 'warning', 'error', 'critical')") |
| 48 return parser |
| 49 |
| 50 |
| 51 class CustomPyTester(PyTester): |
| 52 def __init__(self, cvg, options): |
| 53 super(CustomPyTester, self).__init__(cvg, options) |
| 54 loglevel = options.loglevel.upper() |
| 55 logger = logging.getLogger('erudi') |
| 56 logger.setLevel(logging.getLevelName(loglevel)) |
| 57 |
| 58 |
| 59 In your TestCase class you can then get the value of a specific option with |
| 60 the ``optval`` method:: |
| 61 |
| 62 class MyTestCase(TestCase): |
| 63 def test_foo(self): |
| 64 loglevel = self.optval('loglevel') |
| 65 # ... |
| 66 |
| 67 |
| 68 You can also tag your tag your test for fine filtering |
| 69 |
| 70 With those tag:: |
| 71 |
| 72 from logilab.common.testlib import tag, TestCase |
| 73 |
| 74 class Exemple(TestCase): |
| 75 |
| 76 @tag('rouge', 'carre') |
| 77 def toto(self): |
| 78 pass |
| 79 |
| 80 @tag('carre', 'vert') |
| 81 def tata(self): |
| 82 pass |
| 83 |
| 84 @tag('rouge') |
| 85 def titi(test): |
| 86 pass |
| 87 |
| 88 you can filter the function with a simple python expression |
| 89 |
| 90 * ``toto`` and ``titi`` match ``rouge`` |
| 91 * ``toto``, ``tata`` and ``titi``, match ``rouge or carre`` |
| 92 * ``tata`` and ``titi`` match``rouge ^ carre`` |
| 93 * ``titi`` match ``rouge and not carre`` |
| 94 """ |
| 95 __docformat__ = "restructuredtext en" |
| 96 |
| 97 PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]] |
| 98 |
| 99 examples: |
| 100 |
| 101 pytest path/to/mytests.py |
| 102 pytest path/to/mytests.py TheseTests |
| 103 pytest path/to/mytests.py TheseTests.test_thisone |
| 104 pytest path/to/mytests.py -m '(not long and database) or regr' |
| 105 |
| 106 pytest one (will run both test_thisone and test_thatone) |
| 107 pytest path/to/mytests.py -s not (will skip test_notthisone) |
| 108 |
| 109 pytest --coverage test_foo.py |
| 110 (only if logilab.devtools is available) |
| 111 """ |
| 112 |
| 113 ENABLE_DBC = False |
| 114 FILE_RESTART = ".pytest.restart" |
| 115 |
| 116 import os, sys, re |
| 117 import os.path as osp |
| 118 from time import time, clock |
| 119 import warnings |
| 120 import types |
| 121 |
| 122 from logilab.common.fileutils import abspath_listdir |
| 123 from logilab.common import textutils |
| 124 from logilab.common import testlib, STD_BLACKLIST |
| 125 # use the same unittest module as testlib |
| 126 from logilab.common.testlib import unittest, start_interactive_mode |
| 127 from logilab.common.compat import any |
| 128 import doctest |
| 129 |
| 130 import unittest as unittest_legacy |
| 131 if not getattr(unittest_legacy, "__package__", None): |
| 132 try: |
| 133 import unittest2.suite as unittest_suite |
| 134 except ImportError: |
| 135 sys.exit("You have to install python-unittest2 to use this module") |
| 136 else: |
| 137 import unittest.suite as unittest_suite |
| 138 |
| 139 try: |
| 140 import django |
| 141 from logilab.common.modutils import modpath_from_file, load_module_from_modp
ath |
| 142 DJANGO_FOUND = True |
| 143 except ImportError: |
| 144 DJANGO_FOUND = False |
| 145 |
| 146 CONF_FILE = 'pytestconf.py' |
| 147 |
| 148 ## coverage hacks, do not read this, do not read this, do not read this |
| 149 |
| 150 # hey, but this is an aspect, right ?!!! |
| 151 class TraceController(object): |
| 152 nesting = 0 |
| 153 |
| 154 def pause_tracing(cls): |
| 155 if not cls.nesting: |
| 156 cls.tracefunc = staticmethod(getattr(sys, '__settrace__', sys.settra
ce)) |
| 157 cls.oldtracer = getattr(sys, '__tracer__', None) |
| 158 sys.__notrace__ = True |
| 159 cls.tracefunc(None) |
| 160 cls.nesting += 1 |
| 161 pause_tracing = classmethod(pause_tracing) |
| 162 |
| 163 def resume_tracing(cls): |
| 164 cls.nesting -= 1 |
| 165 assert cls.nesting >= 0 |
| 166 if not cls.nesting: |
| 167 cls.tracefunc(cls.oldtracer) |
| 168 delattr(sys, '__notrace__') |
| 169 resume_tracing = classmethod(resume_tracing) |
| 170 |
| 171 |
| 172 pause_tracing = TraceController.pause_tracing |
| 173 resume_tracing = TraceController.resume_tracing |
| 174 |
| 175 |
| 176 def nocoverage(func): |
| 177 if hasattr(func, 'uncovered'): |
| 178 return func |
| 179 func.uncovered = True |
| 180 def not_covered(*args, **kwargs): |
| 181 pause_tracing() |
| 182 try: |
| 183 return func(*args, **kwargs) |
| 184 finally: |
| 185 resume_tracing() |
| 186 not_covered.uncovered = True |
| 187 return not_covered |
| 188 |
| 189 |
| 190 ## end of coverage hacks |
| 191 |
| 192 |
| 193 TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$") |
| 194 def this_is_a_testfile(filename): |
| 195 """returns True if `filename` seems to be a test file""" |
| 196 return TESTFILE_RE.match(osp.basename(filename)) |
| 197 |
| 198 TESTDIR_RE = re.compile("^(unit)?tests?$") |
| 199 def this_is_a_testdir(dirpath): |
| 200 """returns True if `filename` seems to be a test directory""" |
| 201 return TESTDIR_RE.match(osp.basename(dirpath)) |
| 202 |
| 203 |
| 204 def load_pytest_conf(path, parser): |
| 205 """loads a ``pytestconf.py`` file and update default parser |
| 206 and / or tester. |
| 207 """ |
| 208 namespace = {} |
| 209 execfile(path, namespace) |
| 210 if 'update_parser' in namespace: |
| 211 namespace['update_parser'](parser) |
| 212 return namespace.get('CustomPyTester', PyTester) |
| 213 |
| 214 |
| 215 def project_root(parser, projdir=os.getcwd()): |
| 216 """try to find project's root and add it to sys.path""" |
| 217 previousdir = curdir = osp.abspath(projdir) |
| 218 testercls = PyTester |
| 219 conf_file_path = osp.join(curdir, CONF_FILE) |
| 220 if osp.isfile(conf_file_path): |
| 221 testercls = load_pytest_conf(conf_file_path, parser) |
| 222 while this_is_a_testdir(curdir) or \ |
| 223 osp.isfile(osp.join(curdir, '__init__.py')): |
| 224 newdir = osp.normpath(osp.join(curdir, os.pardir)) |
| 225 if newdir == curdir: |
| 226 break |
| 227 previousdir = curdir |
| 228 curdir = newdir |
| 229 conf_file_path = osp.join(curdir, CONF_FILE) |
| 230 if osp.isfile(conf_file_path): |
| 231 testercls = load_pytest_conf(conf_file_path, parser) |
| 232 return previousdir, testercls |
| 233 |
| 234 |
| 235 class GlobalTestReport(object): |
| 236 """this class holds global test statistics""" |
| 237 def __init__(self): |
| 238 self.ran = 0 |
| 239 self.skipped = 0 |
| 240 self.failures = 0 |
| 241 self.errors = 0 |
| 242 self.ttime = 0 |
| 243 self.ctime = 0 |
| 244 self.modulescount = 0 |
| 245 self.errmodules = [] |
| 246 |
| 247 def feed(self, filename, testresult, ttime, ctime): |
| 248 """integrates new test information into internal statistics""" |
| 249 ran = testresult.testsRun |
| 250 self.ran += ran |
| 251 self.skipped += len(getattr(testresult, 'skipped', ())) |
| 252 self.failures += len(testresult.failures) |
| 253 self.errors += len(testresult.errors) |
| 254 self.ttime += ttime |
| 255 self.ctime += ctime |
| 256 self.modulescount += 1 |
| 257 if not testresult.wasSuccessful(): |
| 258 problems = len(testresult.failures) + len(testresult.errors) |
| 259 self.errmodules.append((filename[:-3], problems, ran)) |
| 260 |
| 261 def failed_to_test_module(self, filename): |
| 262 """called when the test module could not be imported by unittest |
| 263 """ |
| 264 self.errors += 1 |
| 265 self.modulescount += 1 |
| 266 self.ran += 1 |
| 267 self.errmodules.append((filename[:-3], 1, 1)) |
| 268 |
| 269 def skip_module(self, filename): |
| 270 self.modulescount += 1 |
| 271 self.ran += 1 |
| 272 self.errmodules.append((filename[:-3], 0, 0)) |
| 273 |
| 274 def __str__(self): |
| 275 """this is just presentation stuff""" |
| 276 line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)' |
| 277 % (self.ran, self.ttime, self.ctime)] |
| 278 if self.errors: |
| 279 line1.append('%s errors' % self.errors) |
| 280 if self.failures: |
| 281 line1.append('%s failures' % self.failures) |
| 282 if self.skipped: |
| 283 line1.append('%s skipped' % self.skipped) |
| 284 modulesok = self.modulescount - len(self.errmodules) |
| 285 if self.errors or self.failures: |
| 286 line2 = '%s modules OK (%s failed)' % (modulesok, |
| 287 len(self.errmodules)) |
| 288 descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules]) |
| 289 line3 = '\nfailures: %s' % descr |
| 290 elif modulesok: |
| 291 line2 = 'All %s modules OK' % modulesok |
| 292 line3 = '' |
| 293 else: |
| 294 return '' |
| 295 return '%s\n%s%s' % (', '.join(line1), line2, line3) |
| 296 |
| 297 |
| 298 |
| 299 def remove_local_modules_from_sys(testdir): |
| 300 """remove all modules from cache that come from `testdir` |
| 301 |
| 302 This is used to avoid strange side-effects when using the |
| 303 testall() mode of pytest. |
| 304 For instance, if we run pytest on this tree:: |
| 305 |
| 306 A/test/test_utils.py |
| 307 B/test/test_utils.py |
| 308 |
| 309 we **have** to clean sys.modules to make sure the correct test_utils |
| 310 module is ran in B |
| 311 """ |
| 312 for modname, mod in sys.modules.items(): |
| 313 if mod is None: |
| 314 continue |
| 315 if not hasattr(mod, '__file__'): |
| 316 # this is the case of some built-in modules like sys, imp, marshal |
| 317 continue |
| 318 modfile = mod.__file__ |
| 319 # if modfile is not an absolute path, it was probably loaded locally |
| 320 # during the tests |
| 321 if not osp.isabs(modfile) or modfile.startswith(testdir): |
| 322 del sys.modules[modname] |
| 323 |
| 324 |
| 325 |
| 326 class PyTester(object): |
| 327 """encapsulates testrun logic""" |
| 328 |
| 329 def __init__(self, cvg, options): |
| 330 self.report = GlobalTestReport() |
| 331 self.cvg = cvg |
| 332 self.options = options |
| 333 self.firstwrite = True |
| 334 self._errcode = None |
| 335 |
| 336 def show_report(self): |
| 337 """prints the report and returns appropriate exitcode""" |
| 338 # everything has been ran, print report |
| 339 print "*" * 79 |
| 340 print self.report |
| 341 |
| 342 def get_errcode(self): |
| 343 # errcode set explicitly |
| 344 if self._errcode is not None: |
| 345 return self._errcode |
| 346 return self.report.failures + self.report.errors |
| 347 |
| 348 def set_errcode(self, errcode): |
| 349 self._errcode = errcode |
| 350 errcode = property(get_errcode, set_errcode) |
| 351 |
| 352 def testall(self, exitfirst=False): |
| 353 """walks through current working directory, finds something |
| 354 which can be considered as a testdir and runs every test there |
| 355 """ |
| 356 here = os.getcwd() |
| 357 for dirname, dirs, _ in os.walk(here): |
| 358 for skipped in STD_BLACKLIST: |
| 359 if skipped in dirs: |
| 360 dirs.remove(skipped) |
| 361 basename = osp.basename(dirname) |
| 362 if this_is_a_testdir(basename): |
| 363 print "going into", dirname |
| 364 # we found a testdir, let's explore it ! |
| 365 if not self.testonedir(dirname, exitfirst): |
| 366 break |
| 367 dirs[:] = [] |
| 368 if self.report.ran == 0: |
| 369 print "no test dir found testing here:", here |
| 370 # if no test was found during the visit, consider |
| 371 # the local directory as a test directory even if |
| 372 # it doesn't have a traditional test directory name |
| 373 self.testonedir(here) |
| 374 |
| 375 def testonedir(self, testdir, exitfirst=False): |
| 376 """finds each testfile in the `testdir` and runs it |
| 377 |
| 378 return true when all tests has been executed, false if exitfirst and |
| 379 some test has failed. |
| 380 """ |
| 381 for filename in abspath_listdir(testdir): |
| 382 if this_is_a_testfile(filename): |
| 383 if self.options.exitfirst and not self.options.restart: |
| 384 # overwrite restart file |
| 385 try: |
| 386 restartfile = open(FILE_RESTART, "w") |
| 387 restartfile.close() |
| 388 except Exception, e: |
| 389 print >> sys.__stderr__, "Error while overwriting \ |
| 390 succeeded test file :", osp.join(os.getcwd(), FILE_RESTART) |
| 391 raise e |
| 392 # run test and collect information |
| 393 prog = self.testfile(filename, batchmode=True) |
| 394 if exitfirst and (prog is None or not prog.result.wasSuccessful(
)): |
| 395 return False |
| 396 self.firstwrite = True |
| 397 # clean local modules |
| 398 remove_local_modules_from_sys(testdir) |
| 399 return True |
| 400 |
| 401 def testfile(self, filename, batchmode=False): |
| 402 """runs every test in `filename` |
| 403 |
| 404 :param filename: an absolute path pointing to a unittest file |
| 405 """ |
| 406 here = os.getcwd() |
| 407 dirname = osp.dirname(filename) |
| 408 if dirname: |
| 409 os.chdir(dirname) |
| 410 # overwrite restart file if it has not been done already |
| 411 if self.options.exitfirst and not self.options.restart and self.firstwri
te: |
| 412 try: |
| 413 restartfile = open(FILE_RESTART, "w") |
| 414 restartfile.close() |
| 415 except Exception, e: |
| 416 print >> sys.__stderr__, "Error while overwriting \ |
| 417 succeeded test file :", osp.join(os.getcwd(), FILE_RESTART) |
| 418 raise e |
| 419 modname = osp.basename(filename)[:-3] |
| 420 try: |
| 421 print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70,
'=') |
| 422 except TypeError: # < py 2.4 bw compat |
| 423 print >> sys.stderr, (' %s ' % osp.basename(filename)).center(70) |
| 424 try: |
| 425 tstart, cstart = time(), clock() |
| 426 try: |
| 427 testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cv
g=self.cvg, |
| 428 options=self.options, outstream
=sys.stderr) |
| 429 except KeyboardInterrupt: |
| 430 raise |
| 431 except SystemExit, exc: |
| 432 self.errcode = exc.code |
| 433 raise |
| 434 except testlib.SkipTest: |
| 435 print "Module skipped:", filename |
| 436 self.report.skip_module(filename) |
| 437 return None |
| 438 except Exception: |
| 439 self.report.failed_to_test_module(filename) |
| 440 print >> sys.stderr, 'unhandled exception occurred while testing
', modname |
| 441 import traceback |
| 442 traceback.print_exc(file=sys.stderr) |
| 443 return None |
| 444 |
| 445 tend, cend = time(), clock() |
| 446 ttime, ctime = (tend - tstart), (cend - cstart) |
| 447 self.report.feed(filename, testprog.result, ttime, ctime) |
| 448 return testprog |
| 449 finally: |
| 450 if dirname: |
| 451 os.chdir(here) |
| 452 |
| 453 |
| 454 |
| 455 class DjangoTester(PyTester): |
| 456 |
| 457 def load_django_settings(self, dirname): |
| 458 """try to find project's setting and load it""" |
| 459 curdir = osp.abspath(dirname) |
| 460 previousdir = curdir |
| 461 while not osp.isfile(osp.join(curdir, 'settings.py')) and \ |
| 462 osp.isfile(osp.join(curdir, '__init__.py')): |
| 463 newdir = osp.normpath(osp.join(curdir, os.pardir)) |
| 464 if newdir == curdir: |
| 465 raise AssertionError('could not find settings.py') |
| 466 previousdir = curdir |
| 467 curdir = newdir |
| 468 # late django initialization |
| 469 settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, '
settings.py'))) |
| 470 from django.core.management import setup_environ |
| 471 setup_environ(settings) |
| 472 settings.DEBUG = False |
| 473 self.settings = settings |
| 474 # add settings dir to pythonpath since it's the project's root |
| 475 if curdir not in sys.path: |
| 476 sys.path.insert(1, curdir) |
| 477 |
| 478 def before_testfile(self): |
| 479 # Those imports must be done **after** setup_environ was called |
| 480 from django.test.utils import setup_test_environment |
| 481 from django.test.utils import create_test_db |
| 482 setup_test_environment() |
| 483 create_test_db(verbosity=0) |
| 484 self.dbname = self.settings.TEST_DATABASE_NAME |
| 485 |
| 486 def after_testfile(self): |
| 487 # Those imports must be done **after** setup_environ was called |
| 488 from django.test.utils import teardown_test_environment |
| 489 from django.test.utils import destroy_test_db |
| 490 teardown_test_environment() |
| 491 print 'destroying', self.dbname |
| 492 destroy_test_db(self.dbname, verbosity=0) |
| 493 |
| 494 def testall(self, exitfirst=False): |
| 495 """walks through current working directory, finds something |
| 496 which can be considered as a testdir and runs every test there |
| 497 """ |
| 498 for dirname, dirs, files in os.walk(os.getcwd()): |
| 499 for skipped in ('CVS', '.svn', '.hg'): |
| 500 if skipped in dirs: |
| 501 dirs.remove(skipped) |
| 502 if 'tests.py' in files: |
| 503 if not self.testonedir(dirname, exitfirst): |
| 504 break |
| 505 dirs[:] = [] |
| 506 else: |
| 507 basename = osp.basename(dirname) |
| 508 if basename in ('test', 'tests'): |
| 509 print "going into", dirname |
| 510 # we found a testdir, let's explore it ! |
| 511 if not self.testonedir(dirname, exitfirst): |
| 512 break |
| 513 dirs[:] = [] |
| 514 |
| 515 def testonedir(self, testdir, exitfirst=False): |
| 516 """finds each testfile in the `testdir` and runs it |
| 517 |
| 518 return true when all tests has been executed, false if exitfirst and |
| 519 some test has failed. |
| 520 """ |
| 521 # special django behaviour : if tests are splitted in several files, |
| 522 # remove the main tests.py file and tests each test file separately |
| 523 testfiles = [fpath for fpath in abspath_listdir(testdir) |
| 524 if this_is_a_testfile(fpath)] |
| 525 if len(testfiles) > 1: |
| 526 try: |
| 527 testfiles.remove(osp.join(testdir, 'tests.py')) |
| 528 except ValueError: |
| 529 pass |
| 530 for filename in testfiles: |
| 531 # run test and collect information |
| 532 prog = self.testfile(filename, batchmode=True) |
| 533 if exitfirst and (prog is None or not prog.result.wasSuccessful()): |
| 534 return False |
| 535 # clean local modules |
| 536 remove_local_modules_from_sys(testdir) |
| 537 return True |
| 538 |
| 539 def testfile(self, filename, batchmode=False): |
| 540 """runs every test in `filename` |
| 541 |
| 542 :param filename: an absolute path pointing to a unittest file |
| 543 """ |
| 544 here = os.getcwd() |
| 545 dirname = osp.dirname(filename) |
| 546 if dirname: |
| 547 os.chdir(dirname) |
| 548 self.load_django_settings(dirname) |
| 549 modname = osp.basename(filename)[:-3] |
| 550 print >>sys.stderr, (' %s ' % osp.basename(filename)).center(70, '=') |
| 551 try: |
| 552 try: |
| 553 tstart, cstart = time(), clock() |
| 554 self.before_testfile() |
| 555 testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cv
g=self.cvg) |
| 556 tend, cend = time(), clock() |
| 557 ttime, ctime = (tend - tstart), (cend - cstart) |
| 558 self.report.feed(filename, testprog.result, ttime, ctime) |
| 559 return testprog |
| 560 except SystemExit: |
| 561 raise |
| 562 except Exception, exc: |
| 563 import traceback |
| 564 traceback.print_exc() |
| 565 self.report.failed_to_test_module(filename) |
| 566 print 'unhandled exception occurred while testing', modname |
| 567 print 'error: %s' % exc |
| 568 return None |
| 569 finally: |
| 570 self.after_testfile() |
| 571 if dirname: |
| 572 os.chdir(here) |
| 573 |
| 574 |
| 575 def make_parser(): |
| 576 """creates the OptionParser instance |
| 577 """ |
| 578 from optparse import OptionParser |
| 579 parser = OptionParser(usage=PYTEST_DOC) |
| 580 |
| 581 parser.newargs = [] |
| 582 def rebuild_cmdline(option, opt, value, parser): |
| 583 """carry the option to unittest_main""" |
| 584 parser.newargs.append(opt) |
| 585 |
| 586 def rebuild_and_store(option, opt, value, parser): |
| 587 """carry the option to unittest_main and store |
| 588 the value on current parser |
| 589 """ |
| 590 parser.newargs.append(opt) |
| 591 setattr(parser.values, option.dest, True) |
| 592 |
| 593 def capture_and_rebuild(option, opt, value, parser): |
| 594 warnings.simplefilter('ignore', DeprecationWarning) |
| 595 rebuild_cmdline(option, opt, value, parser) |
| 596 |
| 597 # pytest options |
| 598 parser.add_option('-t', dest='testdir', default=None, |
| 599 help="directory where the tests will be found") |
| 600 parser.add_option('-d', dest='dbc', default=False, |
| 601 action="store_true", help="enable design-by-contract") |
| 602 # unittest_main options provided and passed through pytest |
| 603 parser.add_option('-v', '--verbose', callback=rebuild_cmdline, |
| 604 action="callback", help="Verbose output") |
| 605 parser.add_option('-i', '--pdb', callback=rebuild_and_store, |
| 606 dest="pdb", action="callback", |
| 607 help="Enable test failure inspection (conflicts with --cov
erage)") |
| 608 parser.add_option('-x', '--exitfirst', callback=rebuild_and_store, |
| 609 dest="exitfirst", default=False, |
| 610 action="callback", help="Exit on first failure " |
| 611 "(only make sense when pytest run one test file)") |
| 612 parser.add_option('-R', '--restart', callback=rebuild_and_store, |
| 613 dest="restart", default=False, |
| 614 action="callback", |
| 615 help="Restart tests from where it failed (implies exitfirs
t) " |
| 616 "(only make sense if tests previously ran with exitfirst
only)") |
| 617 parser.add_option('--color', callback=rebuild_cmdline, |
| 618 action="callback", |
| 619 help="colorize tracebacks") |
| 620 parser.add_option('-s', '--skip', |
| 621 # XXX: I wish I could use the callback action but it |
| 622 # doesn't seem to be able to get the value |
| 623 # associated to the option |
| 624 action="store", dest="skipped", default=None, |
| 625 help="test names matching this name will be skipped " |
| 626 "to skip several patterns, use commas") |
| 627 parser.add_option('-q', '--quiet', callback=rebuild_cmdline, |
| 628 action="callback", help="Minimal output") |
| 629 parser.add_option('-P', '--profile', default=None, dest='profile', |
| 630 help="Profile execution and store data in the given file") |
| 631 parser.add_option('-m', '--match', default=None, dest='tags_pattern', |
| 632 help="only execute test whose tag match the current patter
n") |
| 633 |
| 634 try: |
| 635 from logilab.devtools.lib.coverage import Coverage |
| 636 parser.add_option('--coverage', dest="coverage", default=False, |
| 637 action="store_true", |
| 638 help="run tests with pycoverage (conflicts with --pdb)
") |
| 639 except ImportError: |
| 640 pass |
| 641 |
| 642 if DJANGO_FOUND: |
| 643 parser.add_option('-J', '--django', dest='django', default=False, |
| 644 action="store_true", |
| 645 help='use pytest for django test cases') |
| 646 return parser |
| 647 |
| 648 |
| 649 def parseargs(parser): |
| 650 """Parse the command line and return (options processed), (options to pass t
o |
| 651 unittest_main()), (explicitfile or None). |
| 652 """ |
| 653 # parse the command line |
| 654 options, args = parser.parse_args() |
| 655 if options.pdb and getattr(options, 'coverage', False): |
| 656 parser.error("'pdb' and 'coverage' options are exclusive") |
| 657 filenames = [arg for arg in args if arg.endswith('.py')] |
| 658 if filenames: |
| 659 if len(filenames) > 1: |
| 660 parser.error("only one filename is acceptable") |
| 661 explicitfile = filenames[0] |
| 662 args.remove(explicitfile) |
| 663 else: |
| 664 explicitfile = None |
| 665 # someone wants DBC |
| 666 testlib.ENABLE_DBC = options.dbc |
| 667 newargs = parser.newargs |
| 668 if options.skipped: |
| 669 newargs.extend(['--skip', options.skipped]) |
| 670 # restart implies exitfirst |
| 671 if options.restart: |
| 672 options.exitfirst = True |
| 673 # append additional args to the new sys.argv and let unittest_main |
| 674 # do the rest |
| 675 newargs += args |
| 676 return options, explicitfile |
| 677 |
| 678 |
| 679 |
| 680 def run(): |
| 681 parser = make_parser() |
| 682 rootdir, testercls = project_root(parser) |
| 683 options, explicitfile = parseargs(parser) |
| 684 # mock a new command line |
| 685 sys.argv[1:] = parser.newargs |
| 686 covermode = getattr(options, 'coverage', None) |
| 687 cvg = None |
| 688 if not '' in sys.path: |
| 689 sys.path.insert(0, '') |
| 690 if covermode: |
| 691 # control_import_coverage(rootdir) |
| 692 from logilab.devtools.lib.coverage import Coverage |
| 693 cvg = Coverage([rootdir]) |
| 694 cvg.erase() |
| 695 cvg.start() |
| 696 if DJANGO_FOUND and options.django: |
| 697 tester = DjangoTester(cvg, options) |
| 698 else: |
| 699 tester = testercls(cvg, options) |
| 700 if explicitfile: |
| 701 cmd, args = tester.testfile, (explicitfile,) |
| 702 elif options.testdir: |
| 703 cmd, args = tester.testonedir, (options.testdir, options.exitfirst) |
| 704 else: |
| 705 cmd, args = tester.testall, (options.exitfirst,) |
| 706 try: |
| 707 try: |
| 708 if options.profile: |
| 709 import hotshot |
| 710 prof = hotshot.Profile(options.profile) |
| 711 prof.runcall(cmd, *args) |
| 712 prof.close() |
| 713 print 'profile data saved in', options.profile |
| 714 else: |
| 715 cmd(*args) |
| 716 except SystemExit: |
| 717 raise |
| 718 except: |
| 719 import traceback |
| 720 traceback.print_exc() |
| 721 finally: |
| 722 if covermode: |
| 723 cvg.stop() |
| 724 cvg.save() |
| 725 tester.show_report() |
| 726 if covermode: |
| 727 print 'coverage information stored, use it with pycoverage -ra' |
| 728 sys.exit(tester.errcode) |
| 729 |
| 730 class SkipAwareTestProgram(unittest.TestProgram): |
| 731 # XXX: don't try to stay close to unittest.py, use optparse |
| 732 USAGE = """\ |
| 733 Usage: %(progName)s [options] [test] [...] |
| 734 |
| 735 Options: |
| 736 -h, --help Show this message |
| 737 -v, --verbose Verbose output |
| 738 -i, --pdb Enable test failure inspection |
| 739 -x, --exitfirst Exit on first failure |
| 740 -s, --skip skip test matching this pattern (no regexp for now) |
| 741 -q, --quiet Minimal output |
| 742 --color colorize tracebacks |
| 743 |
| 744 -m, --match Run only test whose tag match this pattern |
| 745 |
| 746 -P, --profile FILE: Run the tests using cProfile and saving results |
| 747 in FILE |
| 748 |
| 749 Examples: |
| 750 %(progName)s - run default set of tests |
| 751 %(progName)s MyTestSuite - run suite 'MyTestSuite' |
| 752 %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething |
| 753 %(progName)s MyTestCase - run all 'test*' test methods |
| 754 in MyTestCase |
| 755 """ |
| 756 def __init__(self, module='__main__', defaultTest=None, batchmode=False, |
| 757 cvg=None, options=None, outstream=sys.stderr): |
| 758 self.batchmode = batchmode |
| 759 self.cvg = cvg |
| 760 self.options = options |
| 761 self.outstream = outstream |
| 762 super(SkipAwareTestProgram, self).__init__( |
| 763 module=module, defaultTest=defaultTest, |
| 764 testLoader=NonStrictTestLoader()) |
| 765 |
| 766 def parseArgs(self, argv): |
| 767 self.pdbmode = False |
| 768 self.exitfirst = False |
| 769 self.skipped_patterns = [] |
| 770 self.test_pattern = None |
| 771 self.tags_pattern = None |
| 772 self.colorize = False |
| 773 self.profile_name = None |
| 774 import getopt |
| 775 try: |
| 776 options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:', |
| 777 ['help', 'verbose', 'quiet', 'pdb', |
| 778 'exitfirst', 'restart', |
| 779 'skip=', 'color', 'match=', 'profile=
']) |
| 780 for opt, value in options: |
| 781 if opt in ('-h', '-H', '--help'): |
| 782 self.usageExit() |
| 783 if opt in ('-i', '--pdb'): |
| 784 self.pdbmode = True |
| 785 if opt in ('-x', '--exitfirst'): |
| 786 self.exitfirst = True |
| 787 if opt in ('-r', '--restart'): |
| 788 self.restart = True |
| 789 self.exitfirst = True |
| 790 if opt in ('-q', '--quiet'): |
| 791 self.verbosity = 0 |
| 792 if opt in ('-v', '--verbose'): |
| 793 self.verbosity = 2 |
| 794 if opt in ('-s', '--skip'): |
| 795 self.skipped_patterns = [pat.strip() for pat in |
| 796 value.split(', ')] |
| 797 if opt == '--color': |
| 798 self.colorize = True |
| 799 if opt in ('-m', '--match'): |
| 800 #self.tags_pattern = value |
| 801 self.options["tag_pattern"] = value |
| 802 if opt in ('-P', '--profile'): |
| 803 self.profile_name = value |
| 804 self.testLoader.skipped_patterns = self.skipped_patterns |
| 805 if len(args) == 0 and self.defaultTest is None: |
| 806 suitefunc = getattr(self.module, 'suite', None) |
| 807 if isinstance(suitefunc, (types.FunctionType, |
| 808 types.MethodType)): |
| 809 self.test = self.module.suite() |
| 810 else: |
| 811 self.test = self.testLoader.loadTestsFromModule(self.module) |
| 812 return |
| 813 if len(args) > 0: |
| 814 self.test_pattern = args[0] |
| 815 self.testNames = args |
| 816 else: |
| 817 self.testNames = (self.defaultTest, ) |
| 818 self.createTests() |
| 819 except getopt.error, msg: |
| 820 self.usageExit(msg) |
| 821 |
| 822 def runTests(self): |
| 823 if self.profile_name: |
| 824 import cProfile |
| 825 cProfile.runctx('self._runTests()', globals(), locals(), self.profil
e_name ) |
| 826 else: |
| 827 return self._runTests() |
| 828 |
| 829 def _runTests(self): |
| 830 self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity, |
| 831 stream=self.outstream, |
| 832 exitfirst=self.exitfirst, |
| 833 pdbmode=self.pdbmode, |
| 834 cvg=self.cvg, |
| 835 test_pattern=self.test_pattern
, |
| 836 skipped_patterns=self.skipped_
patterns, |
| 837 colorize=self.colorize, |
| 838 batchmode=self.batchmode, |
| 839 options=self.options) |
| 840 |
| 841 def removeSucceededTests(obj, succTests): |
| 842 """ Recursive function that removes succTests from |
| 843 a TestSuite or TestCase |
| 844 """ |
| 845 if isinstance(obj, unittest.TestSuite): |
| 846 removeSucceededTests(obj._tests, succTests) |
| 847 if isinstance(obj, list): |
| 848 for el in obj[:]: |
| 849 if isinstance(el, unittest.TestSuite): |
| 850 removeSucceededTests(el, succTests) |
| 851 elif isinstance(el, unittest.TestCase): |
| 852 descr = '.'.join((el.__class__.__module__, |
| 853 el.__class__.__name__, |
| 854 el._testMethodName)) |
| 855 if descr in succTests: |
| 856 obj.remove(el) |
| 857 # take care, self.options may be None |
| 858 if getattr(self.options, 'restart', False): |
| 859 # retrieve succeeded tests from FILE_RESTART |
| 860 try: |
| 861 restartfile = open(FILE_RESTART, 'r') |
| 862 try: |
| 863 succeededtests = list(elem.rstrip('\n\r') for elem in |
| 864 restartfile.readlines()) |
| 865 removeSucceededTests(self.test, succeededtests) |
| 866 finally: |
| 867 restartfile.close() |
| 868 except Exception, ex: |
| 869 raise Exception("Error while reading succeeded tests into %s: %s
" |
| 870 % (osp.join(os.getcwd(), FILE_RESTART), ex)) |
| 871 |
| 872 result = self.testRunner.run(self.test) |
| 873 # help garbage collection: we want TestSuite, which hold refs to every |
| 874 # executed TestCase, to be gc'ed |
| 875 del self.test |
| 876 if getattr(result, "debuggers", None) and \ |
| 877 getattr(self, "pdbmode", None): |
| 878 start_interactive_mode(result) |
| 879 if not getattr(self, "batchmode", None): |
| 880 sys.exit(not result.wasSuccessful()) |
| 881 self.result = result |
| 882 |
| 883 |
| 884 class SkipAwareTextTestRunner(unittest.TextTestRunner): |
| 885 |
| 886 def __init__(self, stream=sys.stderr, verbosity=1, |
| 887 exitfirst=False, pdbmode=False, cvg=None, test_pattern=None, |
| 888 skipped_patterns=(), colorize=False, batchmode=False, |
| 889 options=None): |
| 890 super(SkipAwareTextTestRunner, self).__init__(stream=stream, |
| 891 verbosity=verbosity) |
| 892 self.exitfirst = exitfirst |
| 893 self.pdbmode = pdbmode |
| 894 self.cvg = cvg |
| 895 self.test_pattern = test_pattern |
| 896 self.skipped_patterns = skipped_patterns |
| 897 self.colorize = colorize |
| 898 self.batchmode = batchmode |
| 899 self.options = options |
| 900 |
| 901 def _this_is_skipped(self, testedname): |
| 902 return any([(pat in testedname) for pat in self.skipped_patterns]) |
| 903 |
| 904 def _runcondition(self, test, skipgenerator=True): |
| 905 if isinstance(test, testlib.InnerTest): |
| 906 testname = test.name |
| 907 else: |
| 908 if isinstance(test, testlib.TestCase): |
| 909 meth = test._get_test_method() |
| 910 func = meth.im_func |
| 911 testname = '%s.%s' % (meth.im_class.__name__, func.__name__) |
| 912 elif isinstance(test, types.FunctionType): |
| 913 func = test |
| 914 testname = func.__name__ |
| 915 elif isinstance(test, types.MethodType): |
| 916 func = test.im_func |
| 917 testname = '%s.%s' % (test.im_class.__name__, func.__name__) |
| 918 else: |
| 919 return True # Not sure when this happens |
| 920 if testlib.is_generator(test) and skipgenerator: |
| 921 return self.does_match_tags(test) # Let inner tests decide at ru
n time |
| 922 if self._this_is_skipped(testname): |
| 923 return False # this was explicitly skipped |
| 924 if self.test_pattern is not None: |
| 925 try: |
| 926 classpattern, testpattern = self.test_pattern.split('.') |
| 927 klass, name = testname.split('.') |
| 928 if classpattern not in klass or testpattern not in name: |
| 929 return False |
| 930 except ValueError: |
| 931 if self.test_pattern not in testname: |
| 932 return False |
| 933 |
| 934 return self.does_match_tags(test) |
| 935 |
| 936 def does_match_tags(self, test): |
| 937 if self.options is not None: |
| 938 tags_pattern = getattr(self.options, 'tags_pattern', None) |
| 939 if tags_pattern is not None: |
| 940 tags = getattr(test, 'tags', testlib.Tags()) |
| 941 if tags.inherit and isinstance(test, types.MethodType): |
| 942 tags = tags | getattr(test.im_class, 'tags', testlib.Tags()) |
| 943 return tags.match(tags_pattern) |
| 944 return True # no pattern |
| 945 |
| 946 def _makeResult(self): |
| 947 return testlib.SkipAwareTestResult(self.stream, self.descriptions, |
| 948 self.verbosity, self.exitfirst, |
| 949 self.pdbmode, self.cvg, self.colorize) |
| 950 |
| 951 def run(self, test): |
| 952 "Run the given test case or test suite." |
| 953 result = self._makeResult() |
| 954 startTime = time() |
| 955 test(result, runcondition=self._runcondition, options=self.options) |
| 956 stopTime = time() |
| 957 timeTaken = stopTime - startTime |
| 958 result.printErrors() |
| 959 if not self.batchmode: |
| 960 self.stream.writeln(result.separator2) |
| 961 run = result.testsRun |
| 962 self.stream.writeln("Ran %d test%s in %.3fs" % |
| 963 (run, run != 1 and "s" or "", timeTaken)) |
| 964 self.stream.writeln() |
| 965 if not result.wasSuccessful(): |
| 966 if self.colorize: |
| 967 self.stream.write(textutils.colorize_ansi("FAILED", color='r
ed')) |
| 968 else: |
| 969 self.stream.write("FAILED") |
| 970 else: |
| 971 if self.colorize: |
| 972 self.stream.write(textutils.colorize_ansi("OK", color='green
')) |
| 973 else: |
| 974 self.stream.write("OK") |
| 975 failed, errored, skipped = map(len, (result.failures, |
| 976 result.errors, |
| 977 result.skipped)) |
| 978 |
| 979 det_results = [] |
| 980 for name, value in (("failures", result.failures), |
| 981 ("errors",result.errors), |
| 982 ("skipped", result.skipped)): |
| 983 if value: |
| 984 det_results.append("%s=%i" % (name, len(value))) |
| 985 if det_results: |
| 986 self.stream.write(" (") |
| 987 self.stream.write(', '.join(det_results)) |
| 988 self.stream.write(")") |
| 989 self.stream.writeln("") |
| 990 return result |
| 991 |
| 992 class NonStrictTestLoader(unittest.TestLoader): |
| 993 """ |
| 994 Overrides default testloader to be able to omit classname when |
| 995 specifying tests to run on command line. |
| 996 |
| 997 For example, if the file test_foo.py contains :: |
| 998 |
| 999 class FooTC(TestCase): |
| 1000 def test_foo1(self): # ... |
| 1001 def test_foo2(self): # ... |
| 1002 def test_bar1(self): # ... |
| 1003 |
| 1004 class BarTC(TestCase): |
| 1005 def test_bar2(self): # ... |
| 1006 |
| 1007 'python test_foo.py' will run the 3 tests in FooTC |
| 1008 'python test_foo.py FooTC' will run the 3 tests in FooTC |
| 1009 'python test_foo.py test_foo' will run test_foo1 and test_foo2 |
| 1010 'python test_foo.py test_foo1' will run test_foo1 |
| 1011 'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2 |
| 1012 """ |
| 1013 |
| 1014 def __init__(self): |
| 1015 self.skipped_patterns = () |
| 1016 |
| 1017 # some magic here to accept empty list by extending |
| 1018 # and to provide callable capability |
| 1019 def loadTestsFromNames(self, names, module=None): |
| 1020 suites = [] |
| 1021 for name in names: |
| 1022 suites.extend(self.loadTestsFromName(name, module)) |
| 1023 return self.suiteClass(suites) |
| 1024 |
| 1025 def _collect_tests(self, module): |
| 1026 tests = {} |
| 1027 for obj in vars(module).values(): |
| 1028 if (issubclass(type(obj), (types.ClassType, type)) and |
| 1029 issubclass(obj, unittest.TestCase)): |
| 1030 classname = obj.__name__ |
| 1031 if classname[0] == '_' or self._this_is_skipped(classname): |
| 1032 continue |
| 1033 methodnames = [] |
| 1034 # obj is a TestCase class |
| 1035 for attrname in dir(obj): |
| 1036 if attrname.startswith(self.testMethodPrefix): |
| 1037 attr = getattr(obj, attrname) |
| 1038 if callable(attr): |
| 1039 methodnames.append(attrname) |
| 1040 # keep track of class (obj) for convenience |
| 1041 tests[classname] = (obj, methodnames) |
| 1042 return tests |
| 1043 |
| 1044 def loadTestsFromSuite(self, module, suitename): |
| 1045 try: |
| 1046 suite = getattr(module, suitename)() |
| 1047 except AttributeError: |
| 1048 return [] |
| 1049 assert hasattr(suite, '_tests'), \ |
| 1050 "%s.%s is not a valid TestSuite" % (module.__name__, suitename) |
| 1051 # python2.3 does not implement __iter__ on suites, we need to return |
| 1052 # _tests explicitly |
| 1053 return suite._tests |
| 1054 |
| 1055 def loadTestsFromName(self, name, module=None): |
| 1056 parts = name.split('.') |
| 1057 if module is None or len(parts) > 2: |
| 1058 # let the base class do its job here |
| 1059 return [super(NonStrictTestLoader, self).loadTestsFromName(name)] |
| 1060 tests = self._collect_tests(module) |
| 1061 collected = [] |
| 1062 if len(parts) == 1: |
| 1063 pattern = parts[0] |
| 1064 if callable(getattr(module, pattern, None) |
| 1065 ) and pattern not in tests: |
| 1066 # consider it as a suite |
| 1067 return self.loadTestsFromSuite(module, pattern) |
| 1068 if pattern in tests: |
| 1069 # case python unittest_foo.py MyTestTC |
| 1070 klass, methodnames = tests[pattern] |
| 1071 for methodname in methodnames: |
| 1072 collected = [klass(methodname) |
| 1073 for methodname in methodnames] |
| 1074 else: |
| 1075 # case python unittest_foo.py something |
| 1076 for klass, methodnames in tests.values(): |
| 1077 # skip methodname if matched by skipped_patterns |
| 1078 for skip_pattern in self.skipped_patterns: |
| 1079 methodnames = [methodname |
| 1080 for methodname in methodnames |
| 1081 if skip_pattern not in methodname] |
| 1082 collected += [klass(methodname) |
| 1083 for methodname in methodnames |
| 1084 if pattern in methodname] |
| 1085 elif len(parts) == 2: |
| 1086 # case "MyClass.test_1" |
| 1087 classname, pattern = parts |
| 1088 klass, methodnames = tests.get(classname, (None, [])) |
| 1089 for methodname in methodnames: |
| 1090 collected = [klass(methodname) for methodname in methodnames |
| 1091 if pattern in methodname] |
| 1092 return collected |
| 1093 |
| 1094 def _this_is_skipped(self, testedname): |
| 1095 return any([(pat in testedname) for pat in self.skipped_patterns]) |
| 1096 |
| 1097 def getTestCaseNames(self, testCaseClass): |
| 1098 """Return a sorted sequence of method names found within testCaseClass |
| 1099 """ |
| 1100 is_skipped = self._this_is_skipped |
| 1101 classname = testCaseClass.__name__ |
| 1102 if classname[0] == '_' or is_skipped(classname): |
| 1103 return [] |
| 1104 testnames = super(NonStrictTestLoader, self).getTestCaseNames( |
| 1105 testCaseClass) |
| 1106 return [testname for testname in testnames if not is_skipped(testname)] |
| 1107 |
| 1108 def _ts_run(self, result, runcondition=None, options=None): |
| 1109 self._wrapped_run(result,runcondition=runcondition, options=options) |
| 1110 self._tearDownPreviousClass(None, result) |
| 1111 self._handleModuleTearDown(result) |
| 1112 return result |
| 1113 |
| 1114 def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None): |
| 1115 for test in self: |
| 1116 if result.shouldStop: |
| 1117 break |
| 1118 if unittest_suite._isnotsuite(test): |
| 1119 self._tearDownPreviousClass(test, result) |
| 1120 self._handleModuleFixture(test, result) |
| 1121 self._handleClassSetUp(test, result) |
| 1122 result._previousTestClass = test.__class__ |
| 1123 if (getattr(test.__class__, '_classSetupFailed', False) or |
| 1124 getattr(result, '_moduleSetUpFailed', False)): |
| 1125 continue |
| 1126 |
| 1127 if hasattr(test, '_wrapped_run'): |
| 1128 try: |
| 1129 test._wrapped_run(result, debug, runcondition=runcondition, opti
ons=options) |
| 1130 except TypeError: |
| 1131 test._wrapped_run(result, debug) |
| 1132 elif not debug: |
| 1133 try: |
| 1134 test(result, runcondition, options) |
| 1135 except TypeError: |
| 1136 test(result) |
| 1137 else: |
| 1138 test.debug() |
| 1139 |
| 1140 |
| 1141 def enable_dbc(*args): |
| 1142 """ |
| 1143 Without arguments, return True if contracts can be enabled and should be |
| 1144 enabled (see option -d), return False otherwise. |
| 1145 |
| 1146 With arguments, return False if contracts can't or shouldn't be enabled, |
| 1147 otherwise weave ContractAspect with items passed as arguments. |
| 1148 """ |
| 1149 if not ENABLE_DBC: |
| 1150 return False |
| 1151 try: |
| 1152 from logilab.aspects.weaver import weaver |
| 1153 from logilab.aspects.lib.contracts import ContractAspect |
| 1154 except ImportError: |
| 1155 sys.stderr.write( |
| 1156 'Warning: logilab.aspects is not available. Contracts disabled.') |
| 1157 return False |
| 1158 for arg in args: |
| 1159 weaver.weave_module(arg, ContractAspect) |
| 1160 return True |
| 1161 |
| 1162 |
| 1163 # monkeypatch unittest and doctest (ouch !) |
| 1164 unittest._TextTestResult = testlib.SkipAwareTestResult |
| 1165 unittest.TextTestRunner = SkipAwareTextTestRunner |
| 1166 unittest.TestLoader = NonStrictTestLoader |
| 1167 unittest.TestProgram = SkipAwareTestProgram |
| 1168 |
| 1169 if sys.version_info >= (2, 4): |
| 1170 doctest.DocTestCase.__bases__ = (testlib.TestCase,) |
| 1171 # XXX check python2.6 compatibility |
| 1172 #doctest.DocTestCase._cleanups = [] |
| 1173 #doctest.DocTestCase._out = [] |
| 1174 else: |
| 1175 unittest.FunctionTestCase.__bases__ = (testlib.TestCase,) |
| 1176 unittest.TestSuite.run = _ts_run |
| 1177 unittest.TestSuite._wrapped_run = _ts_wrapped_run |
OLD | NEW |