Commits

Ronny Pfannschmidt committed b0feedf

inline testrunner

Comments (0)

Files changed (12)

 greenlet = [svn]http://codespeak.net/svn/greenlet/trunk/c
-testrunner = [svn]http://codespeak.net/svn/pypy/build/testrunner
 lib_pypy/pyrepl = [svn]http://codespeak.net/svn/pyrepl/trunk/pyrepl/pyrepl
 80037 greenlet
 80409 lib_pypy/pyrepl
-80409 testrunner

testrunner/runner.py

+import sys, os, signal, thread, Queue, time
+import py
+import subprocess, optparse
+
+if sys.platform == 'win32':
+    PROCESS_TERMINATE = 0x1
+    try:
+        import win32api, pywintypes
+    except ImportError:
+        def _kill(pid, sig):
+            import ctypes
+            winapi = ctypes.windll.kernel32
+            proch = winapi.OpenProcess(PROCESS_TERMINATE, 0, pid)
+            winapi.TerminateProcess(proch, 1) == 1
+            winapi.CloseHandle(proch)
+    else:
+        def _kill(pid, sig):
+            try:
+                proch = win32api.OpenProcess(PROCESS_TERMINATE, 0, pid)
+                win32api.TerminateProcess(proch, 1)
+                win32api.CloseHandle(proch)
+            except pywintypes.error, e:
+                pass
+
+    SIGKILL = SIGTERM = 0
+    READ_MODE = 'rU'
+    WRITE_MODE = 'wb'
+else:
+    def _kill(pid, sig):
+        try:
+            os.kill(pid, sig)
+        except OSError:
+            pass
+
+    SIGKILL = signal.SIGKILL
+    SIGTERM = signal.SIGTERM
+    READ_MODE = 'r'
+    WRITE_MODE = 'w'
+
+EXECUTEFAILED = -1001
+RUNFAILED  = -1000
+TIMEDOUT = -999
+
+def busywait(p, timeout):
+    t0 = time.time()
+    delay = 0.5
+    while True:
+        time.sleep(delay)
+        returncode = p.poll()
+        if returncode is not None:
+            return returncode
+        tnow = time.time()
+        if (tnow-t0) >= timeout:
+            return None
+        delay = min(delay * 1.15, 7.2)
+
+def run(args, cwd, out, timeout=None):
+    f = out.open('w')
+    try:
+        try:
+            p = subprocess.Popen(args, cwd=str(cwd), stdout=f, stderr=f)
+        except Exception, e:
+            f.write("Failed to run %s with cwd='%s' timeout=%s:\n"
+                    " %s\n"
+                    % (args, cwd, timeout, e))
+            return RUNFAILED
+
+        if timeout is None:
+            return p.wait()
+        else:
+            returncode = busywait(p, timeout)
+            if returncode is not None:
+                return returncode
+            # timeout!
+            _kill(p.pid, SIGTERM)
+            if busywait(p, 10) is None:
+                _kill(p.pid, SIGKILL)
+            return TIMEDOUT
+    finally:
+        f.close()
+
+def dry_run(args, cwd, out, timeout=None):
+    f = out.open('w')
+    try:
+        f.write("run %s with cwd='%s' timeout=%s\n" % (args, cwd, timeout))
+    finally:
+        f.close()
+    return 0
+
+def getsignalname(n):
+    for name, value in signal.__dict__.items():
+        if value == n and name.startswith('SIG'):
+            return name
+    return 'signal %d' % (n,)
+
+def execute_test(cwd, test, out, logfname, interp, test_driver,
+                 do_dry_run=False, timeout=None,
+                 _win32=(sys.platform=='win32')):
+    args = interp + test_driver
+    args += ['-p', 'resultlog', '--resultlog=%s' % logfname, test]
+
+    args = map(str, args)
+    interp0 = args[0]
+    if (_win32 and not os.path.isabs(interp0) and
+        ('\\' in interp0 or '/' in interp0)):
+        args[0] = os.path.join(str(cwd), interp0)
+
+    if do_dry_run:
+        runfunc = dry_run
+    else:
+        runfunc = run
+    
+    exitcode = runfunc(args, cwd, out, timeout=timeout)
+    
+    return exitcode
+
+def should_report_failure(logdata):
+    # When we have an exitcode of 1, it might be because of failures
+    # that occurred "regularly", or because of another crash of py.test.
+    # We decide heuristically based on logdata: if it looks like it
+    # contains "F", "E" or "P" then it's a regular failure, otherwise
+    # we have to report it.
+    for line in logdata.splitlines():
+        if (line.startswith('F ') or
+            line.startswith('E ') or
+            line.startswith('P ')):
+            return False
+    return True
+
+def interpret_exitcode(exitcode, test, logdata=""):
+    extralog = ""
+    if exitcode:
+        failure = True
+        if exitcode != 1 or should_report_failure(logdata):
+            if exitcode > 0:
+                msg = "Exit code %d." % exitcode
+            elif exitcode == TIMEDOUT:
+                msg = "TIMEOUT"
+            elif exitcode == RUNFAILED:
+                msg = "Failed to run interp"
+            elif exitcode == EXECUTEFAILED:
+                msg = "Failed with exception in execute-test"
+            else:
+                msg = "Killed by %s." % getsignalname(-exitcode)
+            extralog = "! %s\n %s\n" % (test, msg)
+    else:
+        failure = False
+    return failure, extralog
+
+def worker(num, n, run_param, testdirs, result_queue):
+    sessdir = run_param.sessdir
+    root = run_param.root
+    get_test_driver = run_param.get_test_driver
+    interp = run_param.interp
+    dry_run = run_param.dry_run
+    timeout = run_param.timeout
+    cleanup = run_param.cleanup
+    # xxx cfg thread start
+    while 1:
+        try:
+            test = testdirs.pop(0)
+        except IndexError:
+            result_queue.put(None) # done
+            return
+        result_queue.put(('start', test))
+        basename = py.path.local(test).purebasename
+        logfname = sessdir.join("%d-%s-pytest-log" % (num, basename))
+        one_output = sessdir.join("%d-%s-output" % (num, basename))
+        num += n
+
+        try:
+            test_driver = get_test_driver(test)
+            exitcode = execute_test(root, test, one_output, logfname,
+                                    interp, test_driver, do_dry_run=dry_run,
+                                    timeout=timeout)
+
+            cleanup(test)
+        except:
+            print "execute-test for %r failed with:" % test
+            import traceback
+            traceback.print_exc()
+            exitcode = EXECUTEFAILED
+
+        if one_output.check(file=1):
+            output = one_output.read(READ_MODE)
+        else:
+            output = ""
+        if logfname.check(file=1):
+            logdata = logfname.read(READ_MODE)
+        else:
+            logdata = ""
+
+        failure, extralog = interpret_exitcode(exitcode, test, logdata)
+
+        if extralog:
+            logdata += extralog
+
+        result_queue.put(('done', test, failure, logdata, output))
+
+invoke_in_thread = thread.start_new_thread
+
+def start_workers(n, run_param, testdirs):
+    result_queue = Queue.Queue()
+    for i in range(n):
+        invoke_in_thread(worker, (i, n, run_param, testdirs,
+                                  result_queue))
+    return result_queue
+
+
+def execute_tests(run_param, testdirs, logfile, out):
+    sessdir = py.path.local.make_numbered_dir(prefix='usession-testrunner-',
+                                              keep=4)
+    run_param.sessdir = sessdir
+
+    run_param.startup()
+
+    N = run_param.parallel_runs
+    failure = False
+
+    for testname in testdirs:
+        out.write("-- %s\n" % testname)
+    out.write("-- total: %d to run\n" % len(testdirs))
+
+    result_queue = start_workers(N, run_param, testdirs)
+
+    done = 0
+    started = 0
+
+    worker_done = 0
+    while True:
+        res = result_queue.get()
+        if res is None:
+            worker_done += 1
+            if worker_done == N:
+                break
+            continue
+
+        if res[0] == 'start':
+            started += 1
+            out.write("++ starting %s [%d started in total]\n" % (res[1],
+                                                                  started))
+            continue
+        
+        testname, somefailed, logdata, output = res[1:]
+        done += 1
+        failure = failure or somefailed
+
+        heading = "__ %s [%d done in total] " % (testname, done)
+        
+        out.write(heading + (79-len(heading))*'_'+'\n')
+
+        out.write(output)
+        if logdata:
+            logfile.write(logdata)
+
+    run_param.shutdown()
+
+    return failure
+
+
+class RunParam(object):
+    dry_run = False
+    interp = [os.path.abspath(sys.executable)]
+    pytestpath = os.path.abspath(os.path.join('py', 'bin', 'py.test'))
+    if not os.path.exists(pytestpath):
+        pytestpath = os.path.abspath(os.path.join('pytest.py'))
+        assert os.path.exists(pytestpath)
+    test_driver = [pytestpath]
+
+    parallel_runs = 1
+    timeout = None
+    cherrypick = None
+    
+    def __init__(self, root):
+        self.root = root
+        self.self = self
+
+    def startup(self):
+        pass
+
+    def shutdown(self):
+        pass
+
+    def get_test_driver(self, testdir):
+        return self.test_driver
+
+    def is_test_py_file(self, p):
+        name = p.basename
+        return name.startswith('test_') and name.endswith('.py')
+
+    def reltoroot(self, p):
+        rel = p.relto(self.root)
+        return rel.replace(os.sep, '/')
+
+    def collect_one_testdir(self, testdirs, reldir, tests):
+        testdirs.append(reldir)
+        return
+
+    def collect_testdirs(self, testdirs, p=None):
+        if p is None:
+            p = self.root
+            
+        reldir = self.reltoroot(p)
+        entries = [p1 for p1 in p.listdir() if p1.check(dotfile=0)]
+        entries.sort()
+
+        if p != self.root:
+            for p1 in entries:
+                if self.is_test_py_file(p1):
+                    self.collect_one_testdir(testdirs, reldir,
+                                   [self.reltoroot(t) for t in entries
+                                    if self.is_test_py_file(t)])
+                    return
+
+        for p1 in entries:
+            if p1.check(dir=1, link=0):
+                self.collect_testdirs(testdirs, p1)
+
+    def cleanup(self, testdir):
+        pass
+
+
+def main(args):
+    parser = optparse.OptionParser()
+    parser.add_option("--logfile", dest="logfile", default=None,
+                      help="accumulated machine-readable logfile")
+    parser.add_option("--output", dest="output", default='-',
+                      help="plain test output (default: stdout)")
+    parser.add_option("--config", dest="config", default=[],
+                      action="append",
+                      help="configuration python file (optional)")
+    parser.add_option("--root", dest="root", default=".",
+                      help="root directory for the run")
+    parser.add_option("--parallel-runs", dest="parallel_runs", default=0,
+                      type="int",
+                      help="number of parallel test runs")
+    parser.add_option("--dry-run", dest="dry_run", default=False,
+                      action="store_true",
+                      help="dry run"),
+    parser.add_option("--timeout", dest="timeout", default=None,
+                      type="int",
+                      help="timeout in secs for test processes")
+        
+    opts, args = parser.parse_args(args)
+
+    if opts.logfile is None:
+        print "no logfile specified"
+        sys.exit(2)
+
+    logfile = open(opts.logfile, WRITE_MODE)
+    if opts.output == '-':
+        out = sys.stdout
+    else:
+        out = open(opts.output, WRITE_MODE)
+
+    root = py.path.local(opts.root)
+
+    testdirs = []
+
+    run_param = RunParam(root)
+    # the config files are python files whose run overrides the content
+    # of the run_param instance namespace
+    # in that code function overriding method should not take self
+    # though a self and self.__class__ are available if needed
+    for config_py_file in opts.config:
+        config_py_file = os.path.expanduser(config_py_file)
+        if py.path.local(config_py_file).check(file=1):
+            print >>out, "using config", config_py_file
+            execfile(config_py_file, run_param.__dict__)
+
+    if run_param.cherrypick:
+        for p in run_param.cherrypick:
+            run_param.collect_testdirs(testdirs, root.join(p))
+    else:
+        run_param.collect_testdirs(testdirs)
+
+    if opts.parallel_runs:
+        run_param.parallel_runs = opts.parallel_runs
+    if opts.timeout:
+        run_param.timeout = opts.timeout
+    run_param.dry_run = opts.dry_run
+
+    if run_param.dry_run:
+        print >>out, run_param.__dict__
+    
+    res = execute_tests(run_param, testdirs, logfile, out)
+
+    if res:
+        sys.exit(1)
+
+
+if __name__ == '__main__':
+    main(sys.argv)

testrunner/scratchbox_runner.py

+
+""" This is a very hackish runner for cross compilation toolchain scratchbox.
+Later on we might come out with some general solution
+"""
+
+import os
+
+def args_for_scratchbox(cwd, args):
+    return ['/scratchbox/login', '-d', str(cwd)] + args
+
+def run_scratchbox(args, cwd, out, timeout=None):
+    return run(args_for_scratchbox(cwd, args), cwd, out, timeout)
+
+def dry_run_scratchbox(args, cwd, out, timeout=None):
+    return dry_run(args_for_scratchbox(cwd, args), cwd, out, timeout)
+
+import runner
+# XXX hack hack hack
+dry_run = runner.dry_run
+run = runner.run
+
+runner.dry_run = dry_run_scratchbox
+runner.run = run_scratchbox
+
+if __name__ == '__main__':
+    import sys
+    runner.main(sys.argv)

testrunner/test/__init__.py

Empty file added.

testrunner/test/examples/normal/example.py

+
+def test_one():
+    assert 1 == 10/10
+
+def test_two():
+    assert 2 == 3
+
+def test_three():
+    assert "hello" == "world"
+
+def test_many():
+    for i in range(100):
+        yield test_one,
+
+class TestStuff:
+
+    def test_final(self):
+        crash

testrunner/test/examples/normal/example_importerror.py

+print 1/0

testrunner/test/examples/normal/failingsetup.py

+
+def setup_module(mod):
+    raise RuntimeError
+
+def test_bar(self):
+    assert True

testrunner/test/examples/normal/failingsetup_tricky.py

+
+def setup_module(mod):
+    raise RuntimeError
+
+def test_goo(self):
+    yield (lambda: None)

testrunner/test/examples/stall/example.py

+
+
+def test_hanging():
+    while True:
+        pass

testrunner/test/test_runner.py

+import py, sys, os, signal, cStringIO, tempfile
+
+import runner
+import pypy
+
+pytest_script = py.path.local(pypy.__file__).dirpath('test_all.py')
+
+
+def test_busywait():
+    class FakeProcess:
+        def poll(self):
+            if timers[0] >= timers[1]:
+                return 42
+            return None
+    class FakeTime:
+        def sleep(self, delay):
+            timers[0] += delay
+        def time(self):
+            timers[2] += 1
+            return 12345678.9 + timers[0]
+    p = FakeProcess()
+    prevtime = runner.time
+    try:
+        runner.time = FakeTime()
+        #
+        timers = [0.0, 0.0, 0]
+        returncode = runner.busywait(p, 10)
+        assert returncode == 42 and 0.0 <= timers[0] <= 1.0
+        #
+        timers = [0.0, 3.0, 0]
+        returncode = runner.busywait(p, 10)
+        assert returncode == 42 and 3.0 <= timers[0] <= 5.0 and timers[2] <= 10
+        #
+        timers = [0.0, 500.0, 0]
+        returncode = runner.busywait(p, 1000)
+        assert returncode == 42 and 500.0<=timers[0]<=510.0 and timers[2]<=100
+        #
+        timers = [0.0, 500.0, 0]
+        returncode = runner.busywait(p, 100)    # get a timeout
+        assert returncode == None and 100.0 <= timers[0] <= 110.0
+        #
+    finally:
+        runner.time = prevtime
+
+def test_should_report_failure():
+    should_report_failure = runner.should_report_failure
+    assert should_report_failure("")
+    assert should_report_failure(". Abc\n. Def\n")
+    assert should_report_failure("s Ghi\n")
+    assert not should_report_failure(". Abc\nF Def\n")
+    assert not should_report_failure(". Abc\nE Def\n")
+    assert not should_report_failure(". Abc\nP Def\n")
+    assert not should_report_failure("F Def\n. Ghi\n. Jkl\n")
+
+
+class TestRunHelper(object):
+
+    def setup_method(self, meth):
+        h, self.fn = tempfile.mkstemp()
+        os.close(h)
+
+    def teardown_method(self, meth):
+        os.unlink(self.fn)
+
+    def test_run(self):
+        res = runner.run([sys.executable, "-c", "print 42"], '.',
+                         py.path.local(self.fn))
+        assert res == 0
+        out = py.path.local(self.fn).read('r')
+        assert out == "42\n"
+
+    def test_error(self):
+        res = runner.run([sys.executable, "-c", "import sys; sys.exit(3)"], '.', py.path.local(self.fn))
+        assert res == 3
+
+    def test_signal(self):
+        if sys.platform == 'win32':
+            py.test.skip("no death by signal on windows")
+        res = runner.run([sys.executable, "-c", "import os; os.kill(os.getpid(), 9)"], '.', py.path.local(self.fn))
+        assert res == -9
+
+    def test_timeout(self):
+        res = runner.run([sys.executable, "-c", "while True: pass"], '.', py.path.local(self.fn), timeout=3)
+        assert res == -999
+
+    def test_timeout_lock(self):
+        res = runner.run([sys.executable, "-c", "import threading; l=threading.Lock(); l.acquire(); l.acquire()"], '.', py.path.local(self.fn), timeout=3)
+        assert res == -999
+
+    def test_timeout_syscall(self):
+        res = runner.run([sys.executable, "-c", "import socket; s=s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM); s.bind(('', 0)); s.recv(1000)"], '.', py.path.local(self.fn), timeout=3)
+        assert res == -999        
+
+    def test_timeout_success(self):
+        res = runner.run([sys.executable, "-c", "print 42"], '.',
+                         py.path.local(self.fn), timeout=2)
+        assert res == 0
+        out = py.path.local(self.fn).read('r')
+        assert out == "42\n"        
+
+
+class TestExecuteTest(object):
+
+    def setup_class(cls):
+        cls.real_run = (runner.run,)
+        cls.called = []
+        cls.exitcode = [0]
+        
+        def fake_run(args, cwd, out, timeout):
+            cls.called = (args, cwd, out, timeout)
+            return cls.exitcode[0]
+        runner.run = fake_run
+
+    def teardown_class(cls):
+        runner.run = cls.real_run[0]
+
+    def test_explicit(self):
+        res = runner.execute_test('/wd', 'test_one', 'out', 'LOGFILE',
+                                  interp=['INTERP', 'IARG'],
+                                  test_driver=['driver', 'darg'],
+                                  timeout='secs')
+
+        expected = ['INTERP', 'IARG',
+                    'driver', 'darg',
+                    '--resultlog=LOGFILE',
+                    'test_one']
+
+        assert self.called == (expected, '/wd', 'out', 'secs')        
+        assert res == 0
+
+    def test_explicit_win32(self):
+        res = runner.execute_test('/wd', 'test_one', 'out', 'LOGFILE',
+                                  interp=['./INTERP', 'IARG'],
+                                  test_driver=['driver', 'darg'],
+                                  timeout='secs',
+                                  _win32=True
+                                  )
+
+        expected = ['/wd' + os.sep + './INTERP', 'IARG',
+                    'driver', 'darg',
+                    '--resultlog=LOGFILE',
+                    'test_one']
+
+        assert self.called == (expected, '/wd', 'out', 'secs')        
+        assert res == 0
+
+    def test_error(self):
+        self.exitcode[:] = [1]
+        res = runner.execute_test('/wd', 'test_one', 'out', 'LOGFILE',
+                                  interp=['INTERP', 'IARG'],
+                                  test_driver=['driver', 'darg'])
+        assert res == 1
+
+
+        self.exitcode[:] = [-signal.SIGSEGV]
+        res = runner.execute_test('/wd', 'test_one', 'out', 'LOGFILE',
+                                  interp=['INTERP', 'IARG'],
+                                  test_driver=['driver', 'darg'])
+        assert res == -signal.SIGSEGV
+
+    def test_interpret_exitcode(self):
+        failure, extralog = runner.interpret_exitcode(0, "test_foo")
+        assert not failure
+        assert extralog == ""
+
+        failure, extralog = runner.interpret_exitcode(1, "test_foo", "")
+        assert failure
+        assert extralog == """! test_foo
+ Exit code 1.
+"""
+
+        failure, extralog = runner.interpret_exitcode(1, "test_foo", "F Foo\n")
+        assert failure
+        assert extralog == ""
+
+        failure, extralog = runner.interpret_exitcode(2, "test_foo")
+        assert failure
+        assert extralog == """! test_foo
+ Exit code 2.
+"""
+
+        failure, extralog = runner.interpret_exitcode(-signal.SIGSEGV,
+                                                      "test_foo")
+        assert failure
+        assert extralog == """! test_foo
+ Killed by SIGSEGV.
+"""
+
+class RunnerTests(object):
+    with_thread = True
+
+    def setup_class(cls):
+        cls.real_invoke_in_thread = (runner.invoke_in_thread,)
+        if not cls.with_thread:
+            runner.invoke_in_thread = lambda func, args: func(*args)
+        
+        cls.udir = py.path.local.make_numbered_dir(prefix='usession-runner-',
+                                              keep=3)
+        cls.manydir = cls.udir.join('many').ensure(dir=1)
+
+        cls.udir.join("conftest.py").write("pytest_plugins = 'resultlog'\n")
+
+        def fill_test_dir(test_dir, fromdir='normal'):       
+            for p in py.path.local(__file__).dirpath(
+                'examples', fromdir).listdir("*.py"):
+                p.copy(test_dir.join('test_'+p.basename))
+
+
+        test_normal_dir0 = cls.manydir.join('one', 'test_normal').ensure(dir=1)
+        cls.one_test_dir = cls.manydir.join('one')
+
+        fill_test_dir(test_normal_dir0)
+        
+
+        test_normal_dir1 = cls.manydir.join('two', 'test_normal1').ensure(dir=1)
+        test_normal_dir2 = cls.manydir.join('two', 'pkg',
+                                         'test_normal2').ensure(dir=1)
+        cls.two_test_dir = cls.manydir.join('two')
+
+        fill_test_dir(test_normal_dir1)
+        fill_test_dir(test_normal_dir2)
+
+        cls.test_stall_dir = cls.udir.join('stall').ensure(dir=1)
+        test_stall_dir0 = cls.test_stall_dir.join('zero').ensure(dir=1)
+        fill_test_dir(test_stall_dir0, 'stall')
+
+    def teardown_class(cls):
+        runner.invoke_in_thread = cls.real_invoke_in_thread[0]
+
+    def test_one_dir(self):
+        test_driver = [pytest_script]
+
+        log = cStringIO.StringIO()
+        out = cStringIO.StringIO()
+
+        run_param = runner.RunParam(self.one_test_dir)
+        run_param.test_driver = test_driver
+        run_param.parallel_runs = 3        
+        
+        res = runner.execute_tests(run_param, ['test_normal'], log, out)
+
+        assert res
+
+        out = out.getvalue()
+        assert out
+        assert '\r\n' not in out
+        assert '\n' in out
+
+        log = log.getvalue()
+        assert '\r\n' not in log
+        assert '\n' in log        
+        log_lines = log.splitlines()
+
+        assert log_lines[0] == ". test_normal/test_example.py:test_one"
+        nfailures = 0
+        noutcomes = 0
+        for line in log_lines:
+            if line[0] != ' ':
+                noutcomes += 1
+                if line[0] != '.':
+                    nfailures += 1
+
+        assert noutcomes == 107
+        assert nfailures == 6
+
+    def test_one_dir_dry_run(self):
+        test_driver = [pytest_script]
+
+        log = cStringIO.StringIO()
+        out = cStringIO.StringIO()
+
+        run_param = runner.RunParam(self.one_test_dir)
+        run_param.test_driver = test_driver
+        run_param.parallel_runs = 3
+        run_param.dry_run = True
+        
+        res = runner.execute_tests(run_param, ['test_normal'], log, out)
+
+        assert not res
+
+        assert log.getvalue() == ""
+
+        out_lines = out.getvalue().splitlines()
+
+        assert len(out_lines) == 5
+
+        assert out_lines[2].startswith("++ starting")
+        assert out_lines[4].startswith("run [")
+        for line in out_lines[2:]:
+            assert "test_normal" in line
+
+    def test_many_dirs(self):
+        test_driver = [pytest_script]
+
+        log = cStringIO.StringIO()
+        out = cStringIO.StringIO()
+
+        cleanedup = []
+        def cleanup(testdir):
+            cleanedup.append(testdir)
+
+        run_param = runner.RunParam(self.manydir)
+        run_param.test_driver = test_driver
+        run_param.parallel_runs = 3
+        run_param.cleanup = cleanup
+
+        testdirs = []
+        run_param.collect_testdirs(testdirs)
+        alltestdirs = testdirs[:]
+        
+        res = runner.execute_tests(run_param, testdirs, log, out)
+
+        assert res
+
+        assert out.getvalue()
+
+        log_lines = log.getvalue().splitlines()
+
+        nfailures = 0
+        noutcomes = 0
+        for line in log_lines:
+            if line[0] != ' ':
+                noutcomes += 1
+                if line[0] != '.':
+                    nfailures += 1
+
+        assert noutcomes == 3*107
+        assert nfailures == 3*6
+
+        assert set(cleanedup) == set(alltestdirs)
+
+    def test_timeout(self):
+        test_driver = [pytest_script]
+
+        log = cStringIO.StringIO()
+        out = cStringIO.StringIO()
+
+        run_param = runner.RunParam(self.test_stall_dir)
+        run_param.test_driver = test_driver
+        run_param.parallel_runs = 3
+        run_param.timeout = 3
+
+        testdirs = []
+        run_param.collect_testdirs(testdirs)
+        res = runner.execute_tests(run_param, testdirs, log, out)
+        assert res
+
+        log_lines = log.getvalue().splitlines()
+        assert log_lines[1] == ' TIMEOUT'
+
+    def test_run_wrong_interp(self):
+        log = cStringIO.StringIO()
+        out = cStringIO.StringIO()
+
+        run_param = runner.RunParam(self.one_test_dir)
+        run_param.interp = ['wrong-interp']
+        run_param.parallel_runs = 3
+
+        testdirs = []
+        run_param.collect_testdirs(testdirs)
+        res = runner.execute_tests(run_param, testdirs, log, out)
+        assert res
+
+        log_lines = log.getvalue().splitlines()
+        assert log_lines[1] == ' Failed to run interp'
+
+    def test_run_bad_get_test_driver(self):
+        test_driver = [pytest_script]
+        
+        log = cStringIO.StringIO()
+        out = cStringIO.StringIO()
+
+        run_param = runner.RunParam(self.one_test_dir)
+        run_param.parallel_runs = 3
+        def boom(testdir):
+            raise RuntimeError("Boom")
+        run_param.get_test_driver = boom
+
+        testdirs = []
+        run_param.collect_testdirs(testdirs)
+        res = runner.execute_tests(run_param, testdirs, log, out)
+        assert res
+
+        log_lines = log.getvalue().splitlines()
+        assert log_lines[1] == ' Failed with exception in execute-test'
+
+
+class TestRunnerNoThreads(RunnerTests):
+    with_thread = False
+
+    def test_collect_testdirs(self):
+        res = []
+        seen = []
+        run_param = runner.RunParam(self.one_test_dir)
+        real_collect_one_testdir = run_param.collect_one_testdir
+
+        def witness_collect_one_testdir(testdirs, reldir, tests):
+            seen.append((reldir, sorted(map(str, tests))))
+            real_collect_one_testdir(testdirs, reldir, tests)
+
+        run_param.collect_one_testdir = witness_collect_one_testdir
+        
+        run_param.collect_testdirs(res)
+
+        assert res == ['test_normal']
+        assert len(seen) == 1
+        reldir, tests = seen[0]
+        assert reldir == 'test_normal'
+        for test in tests:
+            assert test.startswith('test_normal/')
+
+        run_param.collect_one_testdir = real_collect_one_testdir
+        res = []
+        run_param = runner.RunParam(self.two_test_dir)
+        
+        run_param.collect_testdirs(res)
+
+        assert sorted(res) == ['pkg/test_normal2', 'test_normal1']        
+
+
+class TestRunner(RunnerTests):
+    pass
+

testrunner/test/test_scratchbox_runner.py

+import scratchbox_runner
+
+def test_scratchbox():
+    expected = ['/scratchbox/login', '-d', 'x/y', 'a', 'b']
+    assert scratchbox_runner.args_for_scratchbox('x/y', ['a', 'b']) == expected