Commits

Anonymous committed b1f9b5c

Added a fail_incomplete test suite

  • Participants
  • Parent commits b980a5f

Comments (0)

Files changed (4)

 import sys, os, re, unittest, subprocess, time, optparse
 import pygame.threads 
 
-from test_runner import run_test, TEST_RESULTS_RE, TEST_RESULTS_START
+from test_runner import run_test, TEST_RESULTS_RE, TEST_RESULTS_START,\
+                        prepare_test_env
+
 from pprint import pformat
 
-# async_sub imported if needed when run in subprocess mode
-
-main_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
-test_subdir = os.path.join(main_dir, 'test')
+main_dir, test_subdir = prepare_test_env()
 fake_test_subdir = os.path.join(test_subdir, 'run_tests__tests')
 test_runner_py = os.path.join(main_dir, "test_runner.py")
 
-sys.path.insert(0, test_subdir)
 import test_utils
 
 ################################### CONSTANTS ##################################
 if args:    
     test_modules = [
         m.endswith('_test') and m or ('%s_test' % m) for m in args
-    ]    
+    ]
 
 ################################################################################
 # Single process mode
 
     def sub_test(module):
         print 'loading', module
-        cmd = [options.python, test_runner_py, module ] + sys.argv[1:]
+        
+        pass_on_args = [a for a in sys.argv[1:] if a not in args]
+        cmd = [options.python, test_runner_py, module ] + pass_on_args
 
         return module, (cmd, test_env, working_dir), proc_in_time_or_kill (
             cmd,
         cmd, test_env, working_dir = cmd
 
         test_results = TEST_RESULTS_RE.search(raw_return)
-        if test_results: 
+        if test_results:
             try:     results.update(eval(test_results.group(1)))
             except:  raise Exception("BUGGY EVAL:\n %s" % test_results.group(1))
 
         print combined
     else:
         print TEST_RESULTS_START
-        print pformat(fails)
+        print pformat(options.all and results or fails)
 
 ################################################################################

test/run_tests__tests/incomplete/fake_2_test.py

+import unittest, test_utils
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(test_utils.test_not_implemented()) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(test_utils.test_not_implemented()) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()

test/run_tests__tests/incomplete/fake_3_test.py

+import unittest
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(True) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(True) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()
 from pprint import pformat
 
 ################################################################################
+
+def prepare_test_env():
+    main_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
+    test_subdir = os.path.join(main_dir, 'test')
+    sys.path.insert(0, test_subdir)
+    return main_dir, test_subdir
+
+main_dir, test_subdir = prepare_test_env()
+import test_utils
+
+################################################################################
 # Set the command line options
 #
 # options are shared with run_tests.py so make sure not to conflict
 
 opt_parser.add_option (
      "-d",  "--dump", action = 'store_true',
-     help   = "dump results as dict ready to eval" )
+     help   = "dump failures/errors as dict ready to eval" )
+
+opt_parser.add_option (
+     "-a",  "--all", action = 'store_true',
+     help   = "dump all results not just errors eg. -da" )
 
 opt_parser.add_option (
      "-H",  "--human", action = 'store_true',
     io.seek(0)
     return io.read()
     
-unittest._TextTestResult.monkey = lambda self, errors: [
+unittest._TextTestResult.monkeyRepr = lambda self, errors: [
     (self.getDescription(e[0]), e[1]) for e in errors
 ]
 
             options.subprocess and modules[0] or 'all_tests':
             {
                 'num_tests' : results.testsRun,
-                'failures'  : results.monkey(results.failures),
-                'errors'    : results.monkey(results.errors),
+                'failures'  : results.monkeyRepr(results.failures),
+                'errors'    : results.monkeyRepr(results.errors),
                 'output'    : captured,
                 'stderr'    : err,
                 'stdout'    : out,