Commits

Devin Jeanpierre committed bbe90dd

Reverted changes in 284ff380f4cb -- the exported information isn't useful.

Instead, interactive doctest fixing will be done via a custom reporter.

Comments (0)

Files changed (6)

         >>> runner = DocTestRunner(verbose=False)
         >>> tests.sort(key = lambda test: test.name)
         >>> for test in tests:
-        ...     print(test.name, '->', runner.run(test).nums())
-        _TestClass -> NumericTestResults(failed=0, succeeded=2, skipped=0)
-        _TestClass.__init__ -> NumericTestResults(failed=0, succeeded=2, skipped=0)
-        _TestClass.get -> NumericTestResults(failed=0, succeeded=2, skipped=0)
-        _TestClass.square -> NumericTestResults(failed=0, succeeded=1, skipped=0)
+        ...     print(test.name, '->', runner.run(test))
+        _TestClass -> TestResults(failed=0, attempted=2)
+        _TestClass.__init__ -> TestResults(failed=0, attempted=2)
+        _TestClass.get -> TestResults(failed=0, attempted=2)
+        _TestClass.square -> TestResults(failed=0, attempted=1)
 
     The `summarize` method returns a summary of all the test cases that
     have been run by the runner:
         tally of results.
         """
         
-        failed = []
-        succeeded = []
-        skipped = []
-        
+        failures = 0
+        tries = 0
         for message in run:
             # If REPORT_ONLY_FIRST_FAILURE is set, then supress
             # reporting after the first failure.
             quiet = (message.optionflags & REPORT_ONLY_FIRST_FAILURE and
-                     bool(failed)) # bool because we mutate failed.
-            
+                     failures > 0)
             if isinstance(message, TestSkipped):
-                skipped.append(example)
+                pass
             elif isinstance(message, TestImminent):
                 # Record that we started this example.
                 if not quiet:
                     reporter.report_start(optionflags, test, example)
             elif isinstance(message, TestFinished):
                 optionflags, example, result, outcome = message
+                tries += 1
                 # Report the outcome.
                 if outcome is SUCCESS:
-                    succeeded.append(example)
                     if not quiet:
                         reporter.report_success(
                             optionflags, test, example, result)
                 elif outcome is FAILURE:
-                    failed.append(example)
                     if not quiet:
                         reporter.report_failure(
                             optionflags, test, example, result)
+                    failures += 1
                 elif outcome is BOOM:
-                    failed.append(example)
                     if not quiet:
                         reporter.report_unexpected_exception(
                             optionflags, test, example, result) # ew
+                    failures += 1
                 else:
                     raise AssertionError("unknown outcome", outcome)
         
         # Record and return the number of failures and tries.
-        self._record_outcome(test, len(failed), len(failed) + len(succeeded))
-        return TestResults(failed, succeeded, skipped)
+        self._record_outcome(test, failures, tries)
+        return TestResults(failures, tries)
 
     #/////////////////////////////////////////////////////////////////
     # Summarization
          ...      >>> x = 2
          ...      ''', {}, 'foo', 'foo.py', 0)
 
-         >>> runner.run(test).nums()
-         NumericTestResults(failed=0, succeeded=1, skipped=0)
+         >>> runner.run(test)
+         TestResults(failed=0, attempted=1)
 
          >>> test.globs
          {}

doctest2/tests/test_doctest.py

     >>> test = doctest.DocTestFinder().find(f)[0]
 
 The main DocTestRunner interface is the `run` method, which runs a
-given DocTest case in a given namespace (globs).  It returns a namedtuple
-`(failed, succeeded, skipped)`, where each element is a list of
-doctest Examples that failed, succeeded, and were skipped, respectively.
-It may be more convenient to call the nums() method of the namedtuple,
-which converts these to numeric counts of how many failed/succeeded/skipped.
+given DocTest case in a given namespace (globs).  It returns a tuple
+`(f,t)`, where `f` is the number of failed tests and `t` is the number
+of tried tests.
 
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=3, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=3)
 
 If any example produces incorrect output, then the test runner reports
 the failure and proceeds to the next example:
     ...     6
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=True).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=True).run(test)
     ... # doctest: +ELLIPSIS
     Trying:
         x = 12
     Expecting:
         6
     ok
-    NumericTestResults(failed=1, succeeded=2, skipped=0)
+    TestResults(failed=1, attempted=3)
 """
     def verbose_flag(): r"""
 The `verbose` flag makes the test runner generate more detailed
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
 
-    >>> doctest.DocTestRunner(verbose=True).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=True).run(test)
     Trying:
         x = 12
     Expecting nothing
     Expecting:
         6
     ok
-    NumericTestResults(failed=0, succeeded=3, skipped=0)
+    TestResults(failed=0, attempted=3)
 
 If the `verbose` flag is unspecified, then the output will be verbose
 iff `-v` appears in sys.argv:
 
     >>> # If -v does not appear in sys.argv, then output isn't verbose.
     >>> sys.argv = ['test']
-    >>> doctest.DocTestRunner().run(test).nums()
-    NumericTestResults(failed=0, succeeded=3, skipped=0)
+    >>> doctest.DocTestRunner().run(test)
+    TestResults(failed=0, attempted=3)
 
     >>> # If -v does appear in sys.argv, then output is verbose.
     >>> sys.argv = ['test', '-v']
-    >>> doctest.DocTestRunner().run(test).nums()
+    >>> doctest.DocTestRunner().run(test)
     Trying:
         x = 12
     Expecting nothing
     Expecting:
         6
     ok
-    NumericTestResults(failed=0, succeeded=3, skipped=0)
+    TestResults(failed=0, attempted=3)
 
     >>> # Restore sys.argv
     >>> sys.argv = old_argv
     ...     ZeroDivisionError: integer division or modulo by zero
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=2)
 
 An example may generate output before it raises an exception; if
 it does, then the traceback message will still be recognized as
     ...     ZeroDivisionError: integer division or modulo by zero
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=2)
 
     >>> def f(x):
     ...     '''
     ...     ZeroDivisionError: integer division or modulo by zero
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums() # doctest: +ELLIPSIS
+    >>> doctest.DocTestRunner(verbose=False).run(test) # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 4, in f
     Failed example:
         Traceback (most recent call last):
           ...
         ZeroDivisionError: integer division or modulo by zero
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
 
 Exception messages may contain newlines:
 
     ...     message
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=1)
 
 If an exception is expected, but an exception with the wrong type or
 message is raised, then it is reported as a failure:
     ...     ValueError: wrong message
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 3, in f
         Traceback (most recent call last):
         ...
         ValueError: message
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
 However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the
 detail:
     ...     ValueError: wrong message
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=1)
 
 IGNORE_EXCEPTION_DETAIL also ignores difference in exception formatting
 between Python versions. For example, in Python 2.x, the module path of
     ...     HTTPException: message
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 4, in f
         Traceback (most recent call last):
         ...
         http.client.HTTPException: message
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
 
 But in Python 3 the module path is included, and therefore a test must look
 like the following test to succeed in Python 3. But that test will fail under
     ...     http.client.HTTPException: message
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=2)
 
 However, with IGNORE_EXCEPTION_DETAIL, the module name of the exception
 (or its unexpected absence) will be ignored:
     ...     HTTPException: message
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=2)
 
 The module path will be completely ignored, so two different module paths will
 still pass if IGNORE_EXCEPTION_DETAIL is given. This is intentional, so it can
     ...     foo.bar.HTTPException: message
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=2)
 
 But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type:
 
     ...     TypeError: wrong type
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 3, in f
         Traceback (most recent call last):
         ...
         ValueError: message
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
 If an exception is raised but not expected, then it is reported as an
 unexpected exception:
     ...     0
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 3, in f
         Traceback (most recent call last):
         ...
         ZeroDivisionError: integer division or modulo by zero
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 """
     def displayhook(): r"""
 Test that changing sys.displayhook doesn't matter for doctest.
     ...     3
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> r = doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> r = doctest.DocTestRunner(verbose=False).run(test)
     >>> post_displayhook = sys.displayhook
 
     We need to restore sys.displayhook now, so that we'll be able to test
     Ok, now we can check that everything is ok.
 
     >>> r
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    TestResults(failed=0, attempted=1)
     >>> post_displayhook is my_displayhook
     True
 """
 
     >>> # Without the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=1)
 
     >>> # With the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
         1
     Got:
         True
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
 The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines
 and the '<BLANKLINE>' marker:
 
     >>> # Without the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=1)
 
     >>> # With the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.DONT_ACCEPT_BLANKLINE
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
         a
     <BLANKLINE>
         b
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
 The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be
 treated as equal:
 
     >>> # Without the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
          3
     Got:
         1 2 3
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
     >>> # With the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.NORMALIZE_WHITESPACE
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
+    TestResults(failed=0, attempted=1)
 
     An example from the docs:
     >>> print(list(range(20))) #doctest: +NORMALIZE_WHITESPACE
 
     >>> # Without the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
         [0, 1, 2, ..., 14]
     Got:
         [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
     >>> # With the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.ELLIPSIS
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
+    TestResults(failed=0, attempted=1)
 
     ... also matches nothing:
 
 
     >>> # Without the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 3, in f
         e
         f
         g
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
     >>> # With the flag:
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.REPORT_UDIFF
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 3, in f
          f
          g
         -h
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
 The REPORT_CDIFF flag causes failures that involve multi-line expected
 and actual outputs to be displayed using a context diff:
     >>> # Reuse f() from the REPORT_UDIFF example, above.
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.REPORT_CDIFF
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 3, in f
         + e
           f
           g
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
 
 The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.REPORT_NDIFF
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 3, in f
         ?                       ^
         + a b  c d e f g h i   j k l m
         ?     +              ++    ^
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
 
 The REPORT_ONLY_FIRST_FAILURE supresses result output after the first
 failing example:
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 5, in f
         200
     Got:
         2
-    NumericTestResults(failed=3, succeeded=2, skipped=0)
+    TestResults(failed=3, attempted=5)
 
 However, output from `report_start` is not supressed:
 
-    >>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     Trying:
         print(1) # first success
         200
     Got:
         2
-    NumericTestResults(failed=3, succeeded=2, skipped=0)
+    TestResults(failed=3, attempted=5)
 
 For the purposes of REPORT_ONLY_FIRST_FAILURE, unexpected exceptions
 count as failures:
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
-    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 5, in f
     Exception raised:
         ...
         ValueError: 2
-    NumericTestResults(failed=3, succeeded=2, skipped=0)
+    TestResults(failed=3, attempted=5)
 
 New option flags can also be registered, via register_optionflag().  Here
 we reach into doctest's internals a bit.
     ...     [0, 1, ..., 9]
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
         [0, 1, ..., 9]
     Got:
         [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
 
 To turn an option off for an example, follow that example with a
 comment of the form ``# doctest: -OPTION``:
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
     >>> doctest.DocTestRunner(verbose=False,
-    ...                       optionflags=doctest.ELLIPSIS).run(test).nums()
+    ...                       optionflags=doctest.ELLIPSIS).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 6, in f
         [0, 1, ..., 9]
     Got:
         [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
 
 Option directives affect only the example that they appear with; they
 do not change the options for surrounding examples:
     ...     [0, 1, ..., 9]
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
         [0, 1, ..., 9]
     Got:
         [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-    NumericTestResults(failed=2, succeeded=1, skipped=0)
+    TestResults(failed=2, attempted=3)
 
 It used to be the case that option directives were parsed out of the source
 text using a regular expression, which resulted in problems.
     ...     [0, 1,  ...,   9]
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
         [0, 1,  ...,   9]
     Got:
         [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
 
     >>> def f(x): r'''
     ...     >>> print(list(range(10)))      # Should fail
     ...     [0, 1,  ...,   9]
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
+    >>> doctest.DocTestRunner(verbose=False).run(test)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
         [0, 1,  ...,   9]
     Got:
         [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
 
     >>> def f(x): r'''
     ...     >>> print(list(range(10)))      # Should fail
     ...     [0, 1,  ...,   9]
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    ... # doctest: +ELLIPSIS +REPORT_UDIFF
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    ... # doctest: +ELLIPSIS
     **********************************************************************
     File ..., line 2, in f
     Failed example:
         [0, 1,  ...,   9]
     Got:
         [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
 
 The option directive may be put on the line following the source, as
 long as a continuation prompt is used:
     ...     [0, 1, ..., 9]
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=1)
 
 For examples with multi-line source, the option directive may appear
 at the end of any line:
     ...      0 1 2 ... 9
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=2)
 
 If more than one line of an example with multi-line source has an
 option directive, then they are combined:
     ...         0  1    2...9
     ...     '''
     >>> test = doctest.DocTestFinder().find(f)[0]
-    >>> doctest.DocTestRunner(verbose=False).run(test).nums()
-    NumericTestResults(failed=0, succeeded=1, skipped=0)
+    >>> doctest.DocTestRunner(verbose=False).run(test)
+    TestResults(failed=0, attempted=1)
 
 It is an error to have a comment of the form ``# doctest:`` that is
 *not* followed by words of the form ``+OPTION`` or ``-OPTION``, where
       ...    'continue', # stop debugging
       ...    ''])
 
-      >>> try: runner.run(test).nums()
+      >>> try: runner.run(test)
       ... finally: sys.stdin = real_stdin
       --Return--
       > <doctest foo-bar@baz[2]>(1)<module>()->None
       (Pdb) print(x)
       42
       (Pdb) continue
-      NumericTestResults(failed=0, succeeded=3, skipped=0)
+      TestResults(failed=0, attempted=3)
 
       You can also put pdb.set_trace in a function called from a test:
 
       ...    ''])
 
       >>> try:
-      ...     runner.run(test).nums()
+      ...     runner.run(test)
       ... finally:
       ...     sys.stdin = real_stdin
       --Return--
       (Pdb) print(x)
       1
       (Pdb) continue
-      NumericTestResults(failed=0, succeeded=2, skipped=0)
+      TestResults(failed=0, attempted=2)
 
     During interactive debugging, source code is shown, even for
     doctest examples:
       ...    'list',     # list source from example 3
       ...    'continue', # stop debugging
       ...    ''])
-      >>> try: runner.run(test).nums()
+      >>> try: runner.run(test)
       ... finally: sys.stdin = real_stdin
       ... # doctest: +NORMALIZE_WHITESPACE
       --Return--
       Expected nothing
       Got:
           9
-      NumericTestResults(failed=1, succeeded=2, skipped=0)
+      TestResults(failed=1, attempted=3)
       """
 
 def test_pdb_set_trace_nested():
     ...    ''])
 
     >>> try:
-    ...     runner.run(test).nums()
+    ...     runner.run(test)
     ... finally:
     ...     sys.stdin = real_stdin
     ... # doctest: +REPORT_NDIFF
     (Pdb) print(foo)
     *** NameError: name 'foo' is not defined
     (Pdb) continue
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    TestResults(failed=0, attempted=2)
 """
 
 def test_DocTestSuite():
     ...     sys.argv = [arg for arg in save_argv if arg != '-v']
 
 
-    >>> doctest.testfile('test_doctest.txt').nums() # doctest: +ELLIPSIS
+    >>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS
     **********************************************************************
     File "...", line 6, in test_doctest.txt
     Failed example:
     1 items had failures:
        1 of   2 in test_doctest.txt
     ***Test Failed*** 1 failures.
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
     >>> doctest.master = None  # Reset master.
 
 (Note: we'll be clearing doctest.master after each call to
 Globals may be specified with the `globs` and `extraglobs` parameters:
 
     >>> globs = {'favorite_color': 'blue'}
-    >>> doctest.testfile('test_doctest.txt', globs=globs).nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.testfile('test_doctest.txt', globs=globs)
+    TestResults(failed=0, attempted=2)
     >>> doctest.master = None  # Reset master.
 
     >>> extraglobs = {'favorite_color': 'red'}
     >>> doctest.testfile('test_doctest.txt', globs=globs,
-    ...                  extraglobs=extraglobs).nums() # doctest: +ELLIPSIS
+    ...                  extraglobs=extraglobs) # doctest: +ELLIPSIS
     **********************************************************************
     File "...", line 6, in test_doctest.txt
     Failed example:
     1 items had failures:
        1 of   2 in test_doctest.txt
     ***Test Failed*** 1 failures.
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
     >>> doctest.master = None  # Reset master.
 
 The file may be made relative to a given module or package, using the
 optional `module_relative` parameter:
 
     >>> doctest.testfile('test_doctest.txt', globs=globs,
-    ...                  module_relative='test').nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    ...                  module_relative='test')
+    TestResults(failed=0, attempted=2)
     >>> doctest.master = None  # Reset master.
 
 Verbosity can be increased with the optional `verbose` paremter:
 
-    >>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True).nums()
+    >>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True)
     Trying:
         favorite_color
     Expecting:
     2 tests in 1 items.
     2 passed and 0 failed.
     Test passed.
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    TestResults(failed=0, attempted=2)
     >>> doctest.master = None  # Reset master.
 
 The name of the test may be specified with the optional `name`
 parameter:
 
-    >>> doctest.testfile('test_doctest.txt', name='newname').nums()
+    >>> doctest.testfile('test_doctest.txt', name='newname')
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File "...", line 6, in newname
     ...
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
     >>> doctest.master = None  # Reset master.
 
 The summary report may be supressed with the optional `report`
 parameter:
 
-    >>> doctest.testfile('test_doctest.txt', report=False).nums()
+    >>> doctest.testfile('test_doctest.txt', report=False)
     ... # doctest: +ELLIPSIS
     **********************************************************************
     File "...", line 6, in test_doctest.txt
     Exception raised:
         ...
         NameError: name 'favorite_color' is not defined
-    NumericTestResults(failed=1, succeeded=1, skipped=0)
+    TestResults(failed=1, attempted=2)
     >>> doctest.master = None  # Reset master.
 
 The optional keyword argument `raise_on_error` can be used to raise an
 it's unknown which encoding is used. The encoding can be specified
 using the optional keyword argument `encoding`:
 
-    >>> doctest.testfile('test_doctest4.txt', encoding='latin-1').nums() # doctest: +ELLIPSIS
+    >>> doctest.testfile('test_doctest4.txt', encoding='latin-1') # doctest: +ELLIPSIS
     **********************************************************************
     File "...", line 7, in test_doctest4.txt
     Failed example:
     1 items had failures:
        2 of   2 in test_doctest4.txt
     ***Test Failed*** 2 failures.
-    NumericTestResults(failed=2, succeeded=0, skipped=0)
+    TestResults(failed=2, attempted=2)
     >>> doctest.master = None  # Reset master.
 
-    >>> doctest.testfile('test_doctest4.txt', encoding='utf-8').nums()
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    >>> doctest.testfile('test_doctest4.txt', encoding='utf-8')
+    TestResults(failed=0, attempted=2)
     >>> doctest.master = None  # Reset master.
 
 Test the verbose output:
 
-    >>> doctest.testfile('test_doctest4.txt', encoding='utf-8', verbose=True).nums()
+    >>> doctest.testfile('test_doctest4.txt', encoding='utf-8', verbose=True)
     Trying:
         'föö'
     Expecting:
     2 tests in 1 items.
     2 passed and 0 failed.
     Test passed.
-    NumericTestResults(failed=0, succeeded=2, skipped=0)
+    TestResults(failed=0, attempted=2)
     >>> doctest.master = None  # Reset master.
     >>> sys.argv = save_argv
 """
 out of the binary module.
 
     >>> import unicodedata
-    >>> doctest.testmod(unicodedata, verbose=False).nums()
-    NumericTestResults(failed=0, succeeded=0, skipped=0)
+    >>> doctest.testmod(unicodedata, verbose=False)
+    TestResults(failed=0, attempted=0)
 """
 
 try:
     >>> test
     <DocTest foo-bär@baz from foo-bär@baz.py:0 (1 example)>
     >>> runner = doctest.DocTestRunner(verbose=False)
-    >>> runner.run(test).nums() # doctest: +ELLIPSIS
+    >>> runner.run(test) # doctest: +ELLIPSIS
     **********************************************************************
     File "foo-bär@baz.py", line 2, in foo-bär@baz
     Failed example:
           File "<doctest foo-bär@baz[0]>", line 1, in <module>
             raise Exception('clé')
         Exception: clé
-    NumericTestResults(failed=1, succeeded=0, skipped=0)
+    TestResults(failed=1, attempted=1)
     """
 
 ######################################################################
 def test_main():
     # Check the doctest cases defined here:
     from doctest2.tests import test_doctest
-    from doctest2 import util
-    util.run_doctest(test_doctest, verbosity=False)
+    support.run_doctest(test_doctest, verbosity=False)
 
 import sys, re, io
 

doctest2/tests/test_doctest2.py

 import sys
 import unittest
 from test import support
-from doctest2 import util
 if sys.flags.optimize >= 2:
     raise unittest.SkipTest("Cannot test docstrings with -O2")
 
 def test_main():
     from doctest2.tests import test_doctest2
     EXPECTED = 19
-    f, t = util.run_doctest(test_doctest2, verbosity=False)
+    f, t = support.run_doctest(test_doctest2, verbosity=False)
     if t != EXPECTED:
         raise support.TestFailed("expected %d tests to run, not %d" %
                                       (EXPECTED, t))

doctest2/tests/test_doctest2_shell.py

     ...
     >>> from doctest2 import constants
     >>> number_of_shells = constants.HAS_SH + constants.HAS_CMD
-    >>> test_results = run_docstring_examples(f, {}).nums()
+    >>> test_results = run_docstring_examples(f, {})
     >>> test_results.failed
     0
-    >>> test_results.succeeded == number_of_shells
+    >>> test_results.attempted == number_of_shells
     True
 
 And for testfile...
     >>> import doctest2.tests, os
     >>> from doctest2 import testfile, constants
     >>> number_of_shells = constants.HAS_SH + constants.HAS_CMD
-    >>> test_results = testfile('test_doctest2_shell2.py').nums()
+    >>> test_results = testfile('test_doctest2_shell2.py')
     >>> test_results.failed
     0
-    >>> test_results.succeeded == number_of_shells
+    >>> test_results.attempted == number_of_shells
     True
 
 and testmod...
     >>> from doctest2 import testmod
     >>> from doctest2.tests import test_doctest2_shell2 as t
     >>> number_of_shells = constants.HAS_SH + constants.HAS_CMD
-    >>> test_results = testmod(t).nums()
+    >>> test_results = testmod(t)
     >>> test_results.failed
     0
-    >>> test_results.succeeded == number_of_shells
+    >>> test_results.attempted == number_of_shells
     True
 
 and the unit test api...

doctest2/tests/test_doctest_additions.py

 the correct number of tests are executed.
 
     >>> from doctest2 import run_docstring_examples
-    >>> results =  run_docstring_examples(DoctestGlobalDirectives, {}).nums()
-    >>> results.succeeded
+    >>> results =  run_docstring_examples(DoctestGlobalDirectives, {})
+    >>> results.attempted
     6
     >>> results2 =  run_docstring_examples(
-    ...     DoctestGlobalDirectivePrecedence, {}).nums()
-    >>> results2.succeeded
+    ...     DoctestGlobalDirectivePrecedence, {})
+    >>> results2.attempted
     4
 
 Just as a note, the intended use is far less arcane than these tests. The
 from .constants import *
 
 # TODO: figure out a better place for this
-class _TestResults(namedtuple('TestResults', 'failed succeeded skipped')):
+class TestResults(namedtuple('TestResults', 'failed attempted')):
     @classmethod
     def merge(cls, results):
-        failed = []
-        succeeded = []
-        skipped = []
+        failed = attempted = 0
         for r in results:
             failed += r.failed
-            succeeded += r.succeeded
-            skipped += r.skipped
+            attempted += r.attempted
         
-        return cls(failed, succeeded, skipped)
-
-class NumericTestResults(_TestResults): pass
-
-class TestResults(_TestResults):
-    def nums(self):
-        return NumericTestResults(*map(len, self))
+        return cls(failed, attempted)
 
 def _extract_future_flags(globs):
     """
     save_stdout = sys.stdout
     sys.stdout = get_original_stdout()
     try:
-        r = doctest2.testmod(module, verbose=verbosity).nums()
-        failed, succeeded, skipped = r
-        if failed:
-            raise support.TestFailed("%d of %d doctests failed" %
-                (failed, failed + succeeded))
+        f, t = doctest2.testmod(module, verbose=verbosity)
+        if f:
+            raise support.TestFailed("%d of %d doctests failed" % (f, t))
     finally:
         sys.stdout = save_stdout
     if support.verbose:
         print('doctest2 (%s) ... %d tests with zero failures' %
-            (module.__name__, succeeded))
-    return failed, succeeded
+            (module.__name__, t))
+    return f, t
 
 def builder(constructor):
     """Create a container using a generator.