Commits

akalias  committed c5fc973

run_tests.py bugfix

  • Participants
  • Parent commits b1f9b5c

Comments (0)

Files changed (4)

File run_tests.py

 
 opt_parser.set_usage("""
 
-Runs all the test/xxxx_test.py tests.
+Runs all or some of the test/xxxx_test.py tests.
+
+$ run_tests.py sprite threads -sd
+
+Runs the sprite and threads module tests isolated in subprocesses, dumping all
+failing tests info in the form of a dict.
 
 """)
 
 test_env = {"PYTHONPATH": test_subdir}
 os.chdir(working_dir)
 
-test_modules = []
-for f in sorted(os.listdir(test_subdir)):
-    for match in TEST_MODULE_RE.findall(f):
-        if ((options.subprocess and match in SUBPROCESS_IGNORE) 
-             or match in IGNORE): continue
-        test_modules.append(match)
-if args:    
+if args:
     test_modules = [
         m.endswith('_test') and m or ('%s_test' % m) for m in args
     ]
+else:
+    if options.subprocess: ignore = SUBPROCESS_IGNORE
+    else: ignore = IGNORE
+
+    test_modules = []
+    for f in sorted(os.listdir(test_subdir)):
+        for match in TEST_MODULE_RE.findall(f):
+            if match in ignore: continue
+            test_modules.append(match)
 
 ################################################################################
 # Single process mode
 #
 
 if not options.subprocess:    
-    single_results = run_test (
-        test_modules,
-        options = options
-    )
-    if options.dump: print pformat(single_results)    #TODO
+    single_results = run_test ( test_modules, options = options)
+    if options.dump: print pformat(single_results)
+    #TODO  make consistent with subprocess mode
     else: print single_results['output']
 
 ################################################################################
         )
 
         if not output or (return_code and RAN_TESTS_DIV not in output):
-            # would this effect the original? TODO
+            # would this effect the original dict? TODO
             results['raw_return'] = ''.join(raw_return.splitlines(1)[:5])
             failures.append( COMPLETE_FAILURE_TEMPLATE % results )
             all_dots += 'E'
         all_of = [a for a in [v.get(arg) for v in results.values()] if a]
         if not all_of: yield 0
         else:
-            yield sum (
-            isinstance(all_of[0], int) and all_of or (len(v) for v in all_of)
-        )
+            if isinstance(all_of[0], int): the_sum = all_of
+            else: the_sum = (len(v) for v in all_of)
+            yield sum(the_sum)
 
 def test_failures(results):
     total,   = count(results, 'num_tests')

File test/run_tests__tests/print_stderr/fake_3_test.py

-import unittest
+import unittest, sys
 
 class KeyModuleTest(unittest.TestCase):
     def test_get_focused(self):

File test/run_tests__tests/run_tests__test.py

                            if os.path.isdir(os.path.join(main_dir, x))
                            and x not in IGNORE ]
 
+
+################################################################################
+
+def assert_on_results(suite, single, sub):
+    test = globals().get('%s_test' % suite)
+    if callable(test):
+        test(suite, single, sub)
+        print "assertions on %s OK" % suite
+
+def incomplete_test(suite, *args):
+    for results in args:
+        assert 'self.assert_(test_utils.test_not_implemented())' in results
+
+# Don't modify tests in suites below. These assertions are in place to make sure
+# that tests are actually being ran
+
+def all_ok_test(uite, *args):
+    for results in args:
+        assert "Ran 36 tests" in results      # some tests are runing
+        assert "OK" in results                # OK
+
+def failures1_test(suite, *args):
+    for results in args: 
+        assert "FAILED (failures=2)" in results
+        assert "Ran 18 tests" in results
+
 ################################################################################
 # Test that output is the same in single process and subprocess modes 
 #
 
-base_cmd = [sys.executable, 'run_tests.py']
+base_cmd = [sys.executable, 'run_tests.py', '-i']
 
 cmd = base_cmd + ['-f']
 sub_cmd = base_cmd + ['-s', '-f']
 
     failed = normed_single != normed_subs
     if failed:
-        print '%s suite comparison FAILED\n' % suite    
+        print '%s suite comparison FAILED\n' % suite
     else:
         passes += 1
         print '%s suite comparison OK' % suite
     
+    assert_on_results(suite, single, subs)
+
     if verbose or failed:
         print "difflib.Differ().compare(single, suprocessed):\n"
         print ''.join ( list(
 passes += 1
 print "OK"
 
-print "\n%s/%s passes" % (passes, len(test_suite_dirs) + 1)
+print "\n%s/%s suites pass" % (passes, len(test_suite_dirs) + 1)
 
 print "\n-h for help"
 

File test_runner.py

 
 opt_parser.add_option (
      "-H",  "--human", action = 'store_true',
-     help   = "dump results as dict ready to eval if unsure" 
-              " (subprocess mode)" ) # TODO
+     help   = "dump results as dict ready to eval if unsure "
+              "that pieced together results are correct "
+              "(subprocess mode)" ) # TODO
 
 opt_parser.add_option (
      "-m",  "--multi_thread", metavar = 'THREADS', type = 'int',
     if isinstance(modules, str): modules = [modules]
     suite = unittest.TestSuite()
 
-    if not options.fake:
-        import test_utils
-        test_utils.fail_incomplete_tests = options.incomplete
 
+    #TODO: ability to pass module.TestCase etc (names) from run_test.py
     for module in modules:
         __import__(module)
         print 'loading', module
-
-        # filter test by tags based on options
+        
+        # TODO: based on optparse options
+        # filter test by tags
+        # decorate tests with profiling wrappers etc
+        
         test = unittest.defaultTestLoader.loadTestsFromName(module)
         suite.addTest(test)
 
 
     captured = StringIO.StringIO()
     runner = unittest.TextTestRunner(stream = captured)
+
+    test_utils.fail_incomplete_tests = options.incomplete
+
     results = runner.run(suite)
-
     captured, err, out = map(StringIOContents, (captured, err, out))
     restore_output(realerr, realout)