Commits

akalias  committed 7291335

update to run_tests.py, show silenced std(err|out) on error

  • Participants
  • Parent commits d3741cc

Comments (0)

Files changed (4)

File run_tests.py

 #################################### IMPORTS ###################################
+# TODO: clean up imports
 
 import sys, os, re, unittest, subprocess, time, optparse
-import pygame.threads 
+import pygame.threads, pygame
 
-from test_runner import run_test, get_test_results, TEST_RESULTS_START,\
-                        prepare_test_env, from_namespace, many_modules_key,\
-                        count, test_failures
-
+from test_runner import *
 from pprint import pformat
 
 main_dir, test_subdir, fake_test_subdir = prepare_test_env()
 test_runner_py = os.path.join(main_dir, "test_runner.py")
 
-import test_utils
+import test_utils, unittest_patch
 
 ################################### CONSTANTS ##################################
 # Defaults:
-#    See optparse options below for more options
+#    See optparse options below for more options (test_runner.py)
 #
 
 # If an xxxx_test.py takes longer than TIME_OUT seconds it will be killed
 )
 
 ################################################################################
-# Human readable output
-#
-
-COMPLETE_FAILURE_TEMPLATE = """
-======================================================================
-ERROR: all_tests_for (%(module)s.AllTestCases)
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "test\%(module)s.py", line 1, in all_tests_for
-subprocess completely failed with return code of %(return_code)s
-cmd:          %(cmd)s
-test_env:     %(test_env)s
-working_dir:  %(working_dir)s
-return (top 5 lines):
-%(raw_return)s
-
-"""  # Leave that last empty line else build page regex won't match
-     # Text also needs to be vertically compressed
-    
-TEST_MODULE_RE = re.compile('^(.+_test)\.py$')
-
-RAN_TESTS_DIV = (70 * "-") + "\nRan"
-
-DOTS = re.compile("^([FE.]*)$", re.MULTILINE)
-
-################################################################################
 # Set the command line options
 #
 # Defined in test_runner.py as it shares options, added to here
 ################################################################################
 # Change to working directory and compile a list of test modules
 # If options.fake, then compile list of fake xxxx_test.py from run_tests__tests
-# this is used for testing subprocess output against single process mode
+
+TEST_MODULE_RE = re.compile('^(.+_test)\.py$')
 
 if options.fake:
     test_subdir = os.path.join(fake_test_subdir, options.fake )
 
 ################################################################################
 # Single process mode
-#
 
 if not options.subprocess:
-    single_results = run_test ( test_modules, options = options)
-    if options.dump: print pformat(single_results)
-    #TODO  make consistent with subprocess mode
-    else: print single_results[many_modules_key(test_modules)]['output']
+    results = {}
+    unittest_patch.patch(options)
+
+    t = time.time()
+    for module in test_modules:
+        results.update(run_test(module, options = options))
+    t = time.time() - t
 
 ################################################################################
 # Subprocess mode
 #
 
-def combine_results(all_results, t):
-    """
-
-    Return pieced together subprocessed results in a form fit for human 
-    consumption. Don't rely on results. Was originally meant for that purpose 
-    but was found to be unreliable. 
-    
-    See options.dump or options.human for reliable results.
-
-    """
-
-    all_dots = ''
-    failures = []
-
-    for module, results in sorted(all_results.items()):
-        output, return_code, raw_return = map (
-            results.get, ('output','return_code', 'raw_return')
-        )
-
-        if not output or (return_code and RAN_TESTS_DIV not in output):
-            # would this effect the original dict? TODO
-            results['raw_return'] = ''.join(raw_return.splitlines(1)[:5])
-            failures.append( COMPLETE_FAILURE_TEMPLATE % results )
-            continue
-
-        dots = DOTS.search(output).group(1)
-        all_dots += dots
-
-        if 'E' in dots or 'F' in dots:
-            failures.append( output[len(dots)+1:].split(RAN_TESTS_DIV)[0] )
-    
-    total_fails, total_errors = map(all_dots.count, 'FE')
-    total_tests = len(all_dots)
-
-    combined = [all_dots]
-    if failures: combined += [''.join(failures).lstrip('\n')[:-1]]
-    combined += ["%s %s tests in %.3fs\n" % (RAN_TESTS_DIV, total_tests, t)]
-
-    if not failures: combined += ['OK\n']
-    else: combined += [
-        'FAILED (%s)\n' % ', '.join (
-            (total_fails  and ["failures=%s" % total_fails] or []) +
-            (total_errors and ["errors=%s"  % total_errors] or [])
-        )]
-
-    return total_tests, '\n'.join(combined)
-
-################################################################################
-
 if options.subprocess:
     from async_sub import proc_in_time_or_kill
 
         test_results = get_test_results(raw_return)
         if test_results: results.update(test_results)
         else: results[module] = {}
-        
+
         add_to_results = [
             'return_code', 'raw_return',  'cmd', 'test_file',
             'test_env', 'working_dir', 'module',
         ]
-        # conditional adds here
 
         results[module].update(from_namespace(locals(), add_to_results))
+    
+    t = time.time() -t
 
-    untrusty_total, combined = combine_results(results, time.time() -t)
-    total, fails = test_failures(results)
+################################################################################
+# Output Results
+#
 
-    if not options.dump or (options.human and untrusty_total == total):
-        print combined
-    else:
-        print TEST_RESULTS_START
-        # print pformat(list(combined_errs(fails)))
-        print pformat(options.all and results or fails)
+untrusty_total, combined = combine_results(results, t)
+total, fails = test_failures(results)
+
+if not options.subprocess: assert total == untrusty_total
+
+if not options.dump or (options.human and untrusty_total == total):
+    print combined
+else:
+    print TEST_RESULTS_START
+    print pformat(options.all and results or fails)
 
 ################################################################################

File test/run_tests__tests/print_stdout/fake_3_test.py

 
     def test_get_pressed(self):
         print 'jibberish ruins everything'
-        self.assert_(True) 
+        self.assert_(False) 
 
     def test_name(self):
         print 'forgot to remove debug crap'

File test_runner.py

 ################################################################################
 
+#TODO: clean up imports
+
 import sys, os, re, unittest, StringIO, time, optparse
 from inspect import getdoc, getmembers, isclass
 from pprint import pformat
 
+import unittest_patch
+from unittest_patch import StringIOContents
+
 ################################################################################
 
 def prepare_test_env():
      help   = "dump failures/errors as dict ready to eval" )
 
 opt_parser.add_option (
-     "-T",  "--timings", type = 'int',
-     help   = "get timings for individual tests" )
+     "-T",  "--timings", type = 'int', default = 1, metavar = 'T',
+     help   = "get timings for individual tests.\n" 
+              "Run test T times, giving average time")
 
 opt_parser.add_option (
      "-e",  "--exclude", 
      help   = "exclude tests containing any of TAGS" )
 
 opt_parser.add_option (
+     "-w",  "--show_output", action = 'store_true',
+     help   = "show silenced stderr/stdout on errors" )
+
+opt_parser.add_option (
      "-a",  "--all", action = 'store_true',
      help   = "dump all results not just errors eg. -da" )
 
      "-H",  "--human", action = 'store_true',
      help   = "dump results as dict ready to eval if unsure "
               "that pieced together results are correct "
-              "(subprocess mode)" ) # TODO
+              "(subprocess mode)" )
 
 opt_parser.add_option (
      "-m",  "--multi_thread", metavar = 'THREADS', type = 'int',
               "default (sys.executable): %s" % sys.executable)
 
 ################################################################################
+# Human readable output
+#
+
+COMPLETE_FAILURE_TEMPLATE = """
+======================================================================
+ERROR: all_tests_for (%(module)s.AllTestCases)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "test\%(module)s.py", line 1, in all_tests_for
+subprocess completely failed with return code of %(return_code)s
+cmd:          %(cmd)s
+test_env:     %(test_env)s
+working_dir:  %(working_dir)s
+return (top 5 lines):
+%(raw_return)s
+
+"""  # Leave that last empty line else build page regex won't match
+     # Text also needs to be vertically compressed
+    
+
+RAN_TESTS_DIV = (70 * "-") + "\nRan"
+
+DOTS = re.compile("^([FE.]*)$", re.MULTILINE)
+
+def combine_results(all_results, t):
+    """
+
+    Return pieced together results in a form fit for human consumption. Don't
+    rely on results if  piecing together subprocessed  results (single process
+    mode is fine). Was originally meant for that  purpose but was found to be
+    unreliable.  See options.dump or options.human for reliable results.
+
+    """
+
+    all_dots = ''
+    failures = []
+
+    for module, results in sorted(all_results.items()):
+        output, return_code, raw_return = map (
+            results.get, ('output','return_code', 'raw_return')
+        )
+
+        if not output or (return_code and RAN_TESTS_DIV not in output):
+            # would this effect the original dict? TODO
+            results['raw_return'] = ''.join(raw_return.splitlines(1)[:5])
+            failures.append( COMPLETE_FAILURE_TEMPLATE % results )
+            continue
+
+        dots = DOTS.search(output).group(1)
+        all_dots += dots
+
+        if 'E' in dots or 'F' in dots:
+            failures.append( output[len(dots)+1:].split(RAN_TESTS_DIV)[0] )
+    
+    total_fails, total_errors = map(all_dots.count, 'FE')
+    total_tests = len(all_dots)
+
+    combined = [all_dots]
+    if failures: combined += [''.join(failures).lstrip('\n')[:-1]]
+    combined += ["%s %s tests in %.3fs\n" % (RAN_TESTS_DIV, total_tests, t)]
+
+    if not failures: combined += ['OK\n']
+    else: combined += [
+        'FAILED (%s)\n' % ', '.join (
+            (total_fails  and ["failures=%s" % total_fails] or []) +
+            (total_errors and ["errors=%s"  % total_errors] or [])
+        )]
+
+    return total_tests, '\n'.join(combined)
+
+################################################################################
 
 TEST_RESULTS_START = "<--!! TEST RESULTS START HERE !!-->"
 TEST_RESULTS_RE = re.compile('%s\n(.*)' % TEST_RESULTS_START, re.DOTALL | re.M)
-FILE_LINENUMBER_RE = re.compile(r'File "([^"]+)", line ([0-9]+)')
 
 def get_test_results(raw_return):
     test_results = TEST_RESULTS_RE.search(raw_return)
             "BUGGY TEST RESULTS EVAL:\n %s" % test_results.group(1)
         )
 
-def count(results, *args, **kw):
-    if kw.get('single'): results = {'single' : results}
-    for arg in args:
-        all_of = [a for a in [v.get(arg) for v in results.values()] if a]
-        if not all_of: yield 0
-        else:
-            if isinstance(all_of[0], int): the_sum = all_of
-            else: the_sum = (len(v) for v in all_of)
-            yield sum(the_sum)
-
-################################################################################
-
-def redirect_output():
-    yield sys.stderr, sys.stdout
-    sys.stderr, sys.stdout = StringIO.StringIO(), StringIO.StringIO()
-    yield sys.stderr, sys.stdout
-
-def restore_output(err, out):
-    sys.stderr, sys.stdout = err, out
-
-def StringIOContents(io):
-    io.seek(0)
-    return io.read()
-
-def merged_dict(*args):
-    merged = {}
-    for arg in args: dictionary.update(arg)        
-    return merged
-
-def many_modules_key(modules):
-    return ', '.join(modules)
-
 ################################################################################
 # ERRORS
-
-unittest._TextTestResult.monkeyedFailRepr = lambda self, flavour, errors:  [
-    (
-        "%s: %s" % (flavour, self.getDescription(e[0])),     # Description
-        e[1],                                                # TraceBack
-        FILE_LINENUMBER_RE.search(e[1]).groups(),            # Blame Info
-    )
-    for e in errors
-]
+# TODO
 
 def make_complete_failure_error(result):
     return (
         "ERROR: all_tests_for (%s.AllTestCases)" % result['module'],
         "Complete Failure (ret code: %s)" % result['return_code'],
-        (result['test_file'], '1'),
+        result['test_file'], 
+        '1',
     )
-
-def combined_errs(results):
-    for result in results.itervalues():
-        combined_errs = result['errors'] + result['failures']
-        for err in combined_errs:
-            yield err
-
-# For combined results, plural, used in subprocess mode
+    
+# For combined results, plural
 def test_failures(results):
     errors = {}
-    total, = count(results, 'num_tests')
-
+    total =  sum(v.get('num_tests', 0) for v in results.values())
     for module, result in results.items():
-        num_errors = sum(count(result, 'failures', 'errors', single = 1))
-        if num_errors is 0 and result['return_code']:
+        num_errors = (
+            len(result.get('failures', [])) + len(result.get('errors', []))
+        )
+        if num_errors is 0 and result.get('return_code'):
             result.update(RESULTS_TEMPLATE)
             result['errors'].append(make_complete_failure_error(result))
             num_errors += 1
 
     return total, errors
 
-################################################################################
-# Profiling
-#
-
-#unittest.TestCase.run
-def unittest_TestCase_run(self, result=None):
-    if result is None: result = self.defaultTestResult()
-    result.startTest(self)
-    testMethod = getattr(self, self._testMethodName)
-    try:
-        t = time.time()
-
-        for i in range(self.times_run):
-            try:
-                self.setUp()
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-                return
-
-            ok = False
-            try:
-                testMethod()
-                ok = True
-            except self.failureException:
-                result.addFailure(self, self._exc_info())
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-            
-            try:
-                self.tearDown()
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-                ok = False
-    
-            if ok:
-                if not i:
-                    result.addSuccess(self)
-            else: break
-        
-        t = (time.time() -t) / self.times_run
-
-        result.timings.update({repr(self): t})
-
-    finally:
-        result.stopTest(self)
-
-# unittest.TestCase.__repr__ = lambda self: (
-#     "%s.%s"% (unittest._strclass(self.__class__), self._testMethodName)
-# )
-
-def TestResult___init__(func):
-    def wrapper(self, *args, **kw):
-        func(self, *args, **kw)
-        self.timings = {}
-    return wrapper
-
-def monkeyPatchTiming(times_runtiming):
-    unittest.TestCase.run = unittest_TestCase_run
-    unittest.TestCase.times_run = times_run
-    
-    unittest.TestResult.__init__ = TestResult___init__(
-        unittest.TestResult.__init__
-    )
-
-################################################################################
-# Exclude by tags
-#
-
-TAGS_RE = re.compile(r"\|[tT]ags:([ a-zA-Z,0-9_\n]+)\|", re.M)
-
-def get_tags(obj):
-    tags = TAGS_RE.search(getdoc(obj) or '')
-    return tags and [t.strip() for t in tags.group(1).split(',')] or []
-
-def getTestCaseNames(self, testCaseClass):
-    """
-        MonkeyPatched method from unittest.TestLoader:
-            Filters test by tags
-
-        Original __doc__:
-            
-            Return a sorted sequence of method names found within testCaseClass
-            
-    """
-
-    def test_wanted(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix):
-        actual_attr = getattr(testCaseClass, attrname)
-        filtered = bool([t for t in get_tags(actual_attr) if t in self.exclude])
-        return ( attrname.startswith(prefix) and callable(actual_attr)
-                 and not filtered )
-
-    testFnNames = filter(test_wanted, dir(testCaseClass))
-    
-    for baseclass in testCaseClass.__bases__:
-        for testFnName in self.getTestCaseNames(baseclass):
-            if testFnName not in testFnNames:  # handle overridden methods
-                testFnNames.append(testFnName)
-
-    if self.sortTestMethodsUsing:
-        testFnNames.sort(self.sortTestMethodsUsing)
-    
-    return testFnNames
-
-unittest.TestLoader.getTestCaseNames = getTestCaseNames
-unittest.defaultTestLoader.exclude = []
+def combined_errs(results):
+    for result in results.values():
+        combined_errs = result['errors'] + result['failures']
+        for err in combined_errs:
+            yield err
 
 ################################################################################
 # For complete failures (+ namespace saving)
 def from_namespace(ns, template):
     if isinstance(template, dict):
         return dict((i, ns.get(i, template[i])) for i in template)
-    else:
-        return dict((i, ns[i]) for i in template)
+    return dict((i, ns[i]) for i in template)
 
 RESULTS_TEMPLATE = {
     'output'     :  '',
-    'stderr'     :  '',
-    'stdout'     :  '',
     'num_tests'  :   0,
     'failures'   :  [],
     'errors'     :  [],
-    'timings'    :   {},
+    'tests'      :  {},
 }
 
 ################################################################################
 
-def run_test(modules, options):
-    ########################################################################
+def run_test(module, options):
     suite = unittest.TestSuite()
+    test_utils.fail_incomplete_tests = options.incomplete
 
-    ########################################################################
-    # Options
+    __import__(module)
+    print 'loading', module
+
+    test = unittest.defaultTestLoader.loadTestsFromName(module)
+    suite.addTest(test)
         
-    test_utils.fail_incomplete_tests = options.incomplete
-    if options.exclude:
-        unittest.defaultTestLoader.exclude = (
-            [e.strip() for e in options.exclude.split(',')]
-        )
-    
-    if options.timings: monkeyPatchTiming(options.timings)
-
-    ########################################################################
-    # load modules, filtering by tag
-    
-    for module in modules:
-        __import__(module)
-        print 'loading', module
-
-        test = unittest.defaultTestLoader.loadTestsFromName(module)
-        suite.addTest(test)
-        
-    ########################################################################
-    # redirect stderr / stdout
-    
-    (realerr, realout), (stderr, stdout) =  redirect_output()
-    # restore_output(realerr, realout)       DEBUG
-
     output = StringIO.StringIO()
     runner = unittest.TextTestRunner(stream = output)
     
-    ########################################################################
-    # run the test suite 
-    
     results = runner.run(suite)
-    
-    ########################################################################
-    # restore output and compile get results
-    
-    output, stderr, stdout = map(StringIOContents, (output, stderr, stdout))
-    restore_output(realerr, realout)
-    
+    output  = StringIOContents(output)
+
     num_tests = results.testsRun
-    failures  = results.monkeyedFailRepr('FAIL', results.failures)
-    errors    = results.monkeyedFailRepr('ERROR', results.errors)
-    if options.timings:
-        timings   = results.timings
+    failures  = results.failures
+    errors    = results.errors
+    tests     = results.tests
 
-    ########################################################################
-    # conditional adds here, profiling etc
-    
-    results = {
-        many_modules_key(modules): from_namespace(locals(), RESULTS_TEMPLATE)
-    }
-    
-    ########################################################################
+    results   = {module:from_namespace(locals(), RESULTS_TEMPLATE)}
     
     if options.subprocess:
         print TEST_RESULTS_START
     else:
         return results
 
-    ########################################################################
-    
+################################################################################
+
 if __name__ == '__main__':
     options, args = opt_parser.parse_args()
+    unittest_patch.patch(options)
     if not args: sys.exit('Called from run_tests.py, use that')
-    run_test([args[0]], options)
-    
+    run_test(args[0], options)
+
 ################################################################################

File unittest_patch.py

+################################################################################
+
+import unittest, re, time, sys, StringIO
+from inspect import getdoc
+
+################################################################################
+# Redirect stdout / stderr for the tests
+
+def redirect_output():
+    yield sys.stderr, sys.stdout
+    sys.stderr, sys.stdout = StringIO.StringIO(), StringIO.StringIO()
+    yield sys.stderr, sys.stdout
+
+def restore_output(err, out):
+    sys.stderr, sys.stdout = err, out
+
+def StringIOContents(io):
+    io.seek(0)
+    return io.read()
+
+################################################################################
+# TestCase patching
+#
+
+def TestCase_run(self, result=None):
+    if result is None: result = self.defaultTestResult()
+    result.startTest(self)
+    testMethod = getattr(self, self._testMethodName)
+    try:
+    
+    ########################################################################
+    # Pre run:
+        
+        result.tests[self.dot_syntax_name()] = {}
+        tests = result.tests[self.dot_syntax_name()]
+        (realerr, realout), (stderr, stdout) =  redirect_output()
+        # restore_output(realerr, realout)      # DEBUG
+
+        t = time.time()
+
+    ########################################################################
+
+        for i in range(self.times_run):
+            try:
+                self.setUp()
+            except KeyboardInterrupt:
+                raise
+            except:
+                result.addError(self, self._exc_info())
+                return
+
+            ok = False
+            try:
+                testMethod()
+                ok = True
+            except self.failureException:
+                result.addFailure(self, self._exc_info())
+            except KeyboardInterrupt:
+                raise
+            except:
+                result.addError(self, self._exc_info())
+            
+            try:
+                self.tearDown()
+            except KeyboardInterrupt:
+                raise
+            except:
+                result.addError(self, self._exc_info())
+                ok = False
+    
+            if ok:
+                if i == 0:
+                    result.addSuccess(self)
+            else: break
+
+    ########################################################################
+    # Post run
+
+        t = (time.time() -t) / self.times_run
+        
+        restore_output(realerr, realout)
+        
+        tests["time"]   = t
+        tests["stdout"] = StringIOContents(stdout)
+        tests["stderr"] = StringIOContents(stderr)
+
+    ########################################################################
+
+    finally:
+        result.stopTest(self)
+
+################################################################################
+# TestResult 
+#
+
+def TestResult___init__(self):
+    self.failures   = []
+    self.errors     = []
+    self.tests      = {}
+    self.testsRun   = 0
+    self.shouldStop = 0
+
+FILE_LINENUMBER_RE = re.compile(r'File "([^"]+)", line ([0-9]+)')
+
+def errorHandling(key):
+    def handling(self, test, err):        
+        traceback = self._exc_info_to_string(err, test)
+        error_file, line_number = FILE_LINENUMBER_RE.search(traceback).groups()
+        error =  (
+            test.dot_syntax_name(),
+            traceback,
+            error_file,
+            line_number,
+        )
+        getattr(self, key).append(error)
+        
+        # Append it to individual test dict for easy access
+        self.tests[test.dot_syntax_name()][key[:-1]] = error
+
+    return handling
+
+################################################################################
+
+def printErrorList(self, flavour, errors):
+    for test, err in ((e[0], e[1]) for e in errors):
+        self.stream.writeln(self.separator1)
+        self.stream.writeln("%s: %s" % (flavour, test))
+        self.stream.writeln(self.separator2)
+        self.stream.writeln("%s" % err)
+
+        # DUMP REDIRECTED STDERR / STDOUT ON ERROR / FAILURE
+        if self.show_redirected_on_errors:
+            stderr, stdout = map(self.tests[test].get, ('stderr','stdout'))
+            if stderr or stdout:
+                if stderr: self.stream.writeln("STDERR:\n%s" % stderr)
+                if stdout: self.stream.writeln("STDOUT:\n%s" % stdout)
+
+################################################################################
+# Exclude by tags
+#
+
+TAGS_RE = re.compile(r"\|[tT]ags:([ a-zA-Z,0-9_\n]+)\|", re.M)
+
+def get_tags(obj):
+    tags = TAGS_RE.search(getdoc(obj) or '')
+    return tags and [t.strip() for t in tags.group(1).split(',')] or []
+
+def getTestCaseNames(self, testCaseClass):
+    def test_wanted(attrname, testCaseClass=testCaseClass, 
+                                    prefix=self.testMethodPrefix):
+
+        actual_attr = getattr(testCaseClass, attrname)
+        filtered = bool([t for t in get_tags(actual_attr) if t in self.exclude])
+        return ( attrname.startswith(prefix) and callable(actual_attr)
+                 and not filtered )
+
+    testFnNames = filter(test_wanted, dir(testCaseClass))
+    
+    for baseclass in testCaseClass.__bases__:
+        for testFnName in self.getTestCaseNames(baseclass):
+            if testFnName not in testFnNames:  # handle overridden methods
+                testFnNames.append(testFnName)
+
+    if self.sortTestMethodsUsing:
+        testFnNames.sort(self.sortTestMethodsUsing)
+
+    return testFnNames
+
+################################################################################
+
+def patch(options):
+    # Tag exclusion
+    if options.exclude:
+        unittest.TestLoader.getTestCaseNames = getTestCaseNames
+        unittest.TestLoader.exclude = (
+            [e.strip() for e in options.exclude.split(',')] )
+
+    # Timing
+    unittest.TestCase.times_run = options.timings
+    unittest.TestCase.run = TestCase_run
+    unittest.TestCase.dot_syntax_name = lambda self: (
+        "%s.%s"% (self.__class__.__name__, self._testMethodName) )
+
+    # Error logging
+    unittest.TestResult.show_redirected_on_errors = options.show_output
+    unittest.TestResult.__init__   = TestResult___init__
+    unittest.TestResult.addError   = errorHandling('errors')
+    unittest.TestResult.addFailure = errorHandling('failures')
+
+    unittest._TextTestResult.printErrorList = printErrorList
+    
+################################################################################