class DocTestRunner():
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> (0, 2)
_TestClass.__init__ -> (0, 2)
_TestClass.get -> (0, 2)
_TestClass.square -> (0, 1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
class attributes and properties:
DIVIDER: **********************************************************************
methods:
def __init__(self, checker=None, verbose=None, optionflags=0):
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
arguments:
return value:
<None>
source: compat/doctest.py
1104 |
1105 |
1106 |
1107 |
1108 |
1109 |
1110 |
1111 |
1112 |
1113 |
1114 |
1115 |
1116 |
1117 |
1118 |
1119 |
1120 |
1121 |
1122 |
1123 |
1124 |
1125 |
1126 |
1127 |
1128 |
1129 |
1130 |
1131 |
1132 |
1133 |
1134 | |
def __init__(self, checker=None, verbose=None, optionflags=0): |
""" |
Create a new test runner. |
|
Optional keyword arg `checker` is the `OutputChecker` that |
should be used to compare the expected outputs and actual |
outputs of doctest examples. |
|
Optional keyword arg 'verbose' prints lots of stuff if true, |
only failures if false; by default, it's true iff '-v' is in |
sys.argv. |
|
Optional argument `optionflags` can be used to control how the |
test runner compares expected output to actual output, and how |
it displays failures. See the documentation for `testmod` for |
more information. |
""" |
self._checker = checker or OutputChecker() |
if verbose is None: |
verbose = '-v' in sys.argv |
self._verbose = verbose |
self.optionflags = optionflags |
self.original_optionflags = optionflags |
|
|
self.tries = 0 |
self.failures = 0 |
self._name2ft = {} |
|
|
self._fakeout = _SpoofOut() | |
def merge(self, other):
*no docstring available*
arguments:
return value:
<None>
source: compat/doctest.py
1450 |
1451 |
1452 |
1453 |
1454 |
1455 |
1456 |
1457 |
1458 |
1459 | |
def merge(self, other): |
d = self._name2ft |
for name, (f, t) in other._name2ft.items(): |
if name in d: |
print "*** DocTestRunner.merge: '" + name + "' in both" \ |
" testers; summing outcomes." |
f2, t2 = d[name] |
f = f + f2 |
t = t + t2 |
d[name] = f, t | |
def report_failure(self, out, test, example, got):
Report that the given example failed.
arguments:
- self: <UNKNOWN>
- out: <UNKNOWN>
- test: <UNKNOWN>
- example: <UNKNOWN>
- got: <UNKNOWN>
return value:
<UNKNOWN>
source: compat/doctest.py
1161 |
1162 |
1163 |
1164 |
1165 |
1166 | |
def report_failure(self, out, test, example, got): |
""" |
Report that the given example failed. |
""" |
out(self._failure_header(test, example) + |
self._checker.output_difference(example, got, self.optionflags)) | |
def report_start(self, out, test, example):
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
arguments:
return value:
<None>
source: compat/doctest.py
1140 |
1141 |
1142 |
1143 |
1144 |
1145 |
1146 |
1147 |
1148 |
1149 |
1150 |
1151 | |
def report_start(self, out, test, example): |
""" |
Report that the test runner is about to process the given |
example. (Only displays a message if verbose=True) |
""" |
if self._verbose: |
if example.want: |
out('Trying:\n' + _indent(example.source) + |
'Expecting:\n' + _indent(example.want)) |
else: |
out('Trying:\n' + _indent(example.source) + |
'Expecting nothing\n') | |
def report_success(self, out, test, example, got):
Report that the given example ran successfully. (Only
displays a message if verbose=True)
arguments:
- self: <UNKNOWN>
- out: <UNKNOWN>
- test: <UNKNOWN>
- example: <UNKNOWN>
- got: <UNKNOWN>
return value:
<UNKNOWN>
source: compat/doctest.py
1153 |
1154 |
1155 |
1156 |
1157 |
1158 |
1159 | |
def report_success(self, out, test, example, got): |
""" |
Report that the given example ran successfully. (Only |
displays a message if verbose=True) |
""" |
if self._verbose: |
out("ok\n") | |
def report_unexpected_exception(self, out, test, example, exc_info):
Report that the given example raised an unexpected exception.
arguments:
- self: <UNKNOWN>
- out: <UNKNOWN>
- test: <UNKNOWN>
- example: <UNKNOWN>
- exc_info: <UNKNOWN>
return value:
<UNKNOWN>
source: compat/doctest.py
1168 |
1169 |
1170 |
1171 |
1172 |
1173 | |
def report_unexpected_exception(self, out, test, example, exc_info): |
""" |
Report that the given example raised an unexpected exception. |
""" |
out(self._failure_header(test, example) + |
'Exception raised:\n' + _indent(_exception_traceback(exc_info))) | |
def run(self, test, compileflags=None, out=None, clear_globs=True):
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
arguments:
return value:
<Tuple>
source: compat/doctest.py
1336 |
1337 |
1338 |
1339 |
1340 |
1341 |
1342 |
1343 |
1344 |
1345 |
1346 |
1347 |
1348 |
1349 |
1350 |
1351 |
1352 |
1353 |
1354 |
1355 |
1356 |
1357 |
1358 |
1359 |
1360 |
1361 |
1362 |
1363 |
1364 |
1365 |
1366 |
1367 |
1368 |
1369 |
1370 |
1371 |
1372 |
1373 |
1374 |
1375 |
1376 |
1377 |
1378 |
1379 |
1380 |
1381 |
1382 |
1383 |
1384 |
1385 |
1386 |
1387 |
1388 | |
def run(self, test, compileflags=None, out=None, clear_globs=True): |
""" |
Run the examples in `test`, and display the results using the |
writer function `out`. |
|
The examples are run in the namespace `test.globs`. If |
`clear_globs` is true (the default), then this namespace will |
be cleared after the test runs, to help with garbage |
collection. If you would like to examine the namespace after |
the test completes, then use `clear_globs=False`. |
|
`compileflags` gives the set of flags that should be used by |
the Python compiler when running the examples. If not |
specified, then it will default to the set of future-import |
flags that apply to `globs`. |
|
The output of each example is checked using |
`DocTestRunner.check_output`, and the results are formatted by |
the `DocTestRunner.report_*` methods. |
""" |
self.test = test |
|
if compileflags is None: |
compileflags = _extract_future_flags(test.globs) |
|
save_stdout = sys.stdout |
if out is None: |
out = save_stdout.write |
sys.stdout = self._fakeout |
|
|
|
|
|
|
save_set_trace = pdb.set_trace |
self.debugger = _OutputRedirectingPdb(save_stdout) |
self.debugger.reset() |
pdb.set_trace = self.debugger.set_trace |
|
|
|
self.save_linecache_getlines = linecache.getlines |
linecache.getlines = self.__patched_linecache_getlines |
|
try: |
return self.__run(test, compileflags, out) |
finally: |
sys.stdout = save_stdout |
pdb.set_trace = save_set_trace |
linecache.getlines = self.save_linecache_getlines |
if clear_globs: |
test.globs.clear() | |
def summarize(self, verbose=None):
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
arguments:
return value:
<Tuple>
source: compat/doctest.py
1393 |
1394 |
1395 |
1396 |
1397 |
1398 |
1399 |
1400 |
1401 |
1402 |
1403 |
1404 |
1405 |
1406 |
1407 |
1408 |
1409 |
1410 |
1411 |
1412 |
1413 |
1414 |
1415 |
1416 |
1417 |
1418 |
1419 |
1420 |
1421 |
1422 |
1423 |
1424 |
1425 |
1426 |
1427 |
1428 |
1429 |
1430 |
1431 |
1432 |
1433 |
1434 |
1435 |
1436 |
1437 |
1438 |
1439 |
1440 |
1441 |
1442 |
1443 |
1444 |
1445 | |
def summarize(self, verbose=None): |
""" |
Print a summary of all the test cases that have been run by |
this DocTestRunner, and return a tuple `(f, t)`, where `f` is |
the total number of failed examples, and `t` is the total |
number of tried examples. |
|
The optional `verbose` argument controls how detailed the |
summary is. If the verbosity is not specified, then the |
DocTestRunner's verbosity is used. |
""" |
if verbose is None: |
verbose = self._verbose |
notests = [] |
passed = [] |
failed = [] |
totalt = totalf = 0 |
for x in self._name2ft.items(): |
name, (f, t) = x |
assert f <= t |
totalt += t |
totalf += f |
if t == 0: |
notests.append(name) |
elif f == 0: |
passed.append( (name, t) ) |
else: |
failed.append(x) |
if verbose: |
if notests: |
print len(notests), "items had no tests:" |
notests.sort() |
for thing in notests: |
print " ", thing |
if passed: |
print len(passed), "items passed all tests:" |
passed.sort() |
for thing, count in passed: |
print " %3d tests in %s" % (count, thing) |
if failed: |
print self.DIVIDER |
print len(failed), "items had failures:" |
failed.sort() |
for thing, (f, t) in failed: |
print " %3d of %3d in %s" % (f, t, thing) |
if verbose: |
print totalt, "tests in", len(self._name2ft), "items." |
print totalt - totalf, "passed and", totalf, "failed." |
if totalf: |
print "***Test Failed***", totalf, "failures." |
elif verbose: |
print "Test passed." |
return totalf, totalt | |