Commits

Anonymous committed 85f4f4f

Merged run_tests.py functionality into run_tests_sub.py, added optparse cmdline options. Adding in buildpage + run_tests__tests directory.

Comments (0)

Files changed (14)

+################################################################################
+"""
+
+Modification of http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
+
+"""
+
+#################################### IMPORTS ###################################
+
 import os
 import subprocess
 import errno
 import time
 import sys
-
-PIPE = subprocess.PIPE
+import unittest
+import tempfile
 
 if subprocess.mswindows:
     # sys.path.append('async_libs.zip')
     from win32pipe import PeekNamedPipe
 
     import msvcrt
+
+    def _call_proc(cmd):
+        return subprocess.Popen (
+            cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell = 1,
+        )
+
+    win32_kill_commands = (
+        ('pskill', 'pskill -t %s'),
+        ('taskkill /?', 'taskkill /F /T /PID %s'),  # /? so no err code
+    )
+
+    for test_cmd, kill_cmd in win32_kill_commands:
+        if _call_proc(test_cmd).wait() is not 1:
+            os.kill = lambda pid: _call_proc(kill_cmd % pid)
+            break
+
+        else: os.kill = None
+
+    if os.kill is None:
+        raise SystemExit('No way of killing unruly processes. Try installing '
+                         'sysinternals pskill and placing on %PATH%.')
+
 else:
     import select
     import fcntl
 
+################################### CONSTANTS ##################################
+
+PIPE = subprocess.PIPE
+
+################################################################################
+
+
 class Popen(subprocess.Popen):
     def recv(self, maxsize=None):
         return self._recv('stdout', maxsize)
                 if not conn.closed:
                     fcntl.fcntl(conn, fcntl.F_SETFL, flags)
 
-if __name__ == '__main__':
+################################################################################
+
+def proc_in_time_or_kill(cmd, time_out):
+    proc = Popen (
+        cmd, shell = True, bufsize = -1,
+        stdin = subprocess.PIPE, stdout = subprocess.PIPE, 
+        stderr = subprocess.STDOUT, universal_newlines = 1
+    )
+
+    ret_code = None
+    response = []
+
+    t = time.time()
+    while ret_code is None and ((time.time() -t) < time_out):
+        ret_code = proc.poll()
+        response += [proc.read_async(wait=0.1, e=0)]
+
+    if ret_code is None:
+        os.kill(proc.pid)
+        ret_code = '"Process timed out (time_out = %s secs)"' % time_out
+
+    return ret_code, ''.join(response)
+
+################################################################################
+
+class AsyncTest(unittest.TestCase):
+    def test_proc_in_time_or_kill(self):
+        temp_dir   = tempfile.mkdtemp()
+        temp_file  = os.path.join(temp_dir, 'xxxxxx.py')
+        fh = open(temp_file, 'w')
+        try:
+            fh.write('while True: print "GST"')
+        finally:
+            fh.close()
+
+        ret_code, response = proc_in_time_or_kill(temp_file, time_out=3)
+
+        self.assert_(
+            ret_code.startswith('"Process timed out') and
+            "GST" in response
+        )    
+
+################################################################################
+
+def _example():
     if sys.platform == 'win32':
         shell, commands, tail = ('cmd', ('echo "hello"', 'echo "HELLO WORLD"'), '\r\n')
     else:
         print a.read_async(),
     a.send_all('exit' + tail)
     print a.read_async(e=0)
-    a.wait()
+    a.wait()
+
+################################################################################
+    
+if __name__ == '__main__':
+    if 1:
+        unittest.main()
+    else:
+        _example()
+#!/usr/bin/env python
+
+"""
+
+Test runner for pygame unittests:
+
+By default, runs all test/xxxx_test.py files in a single process.
+
+Option to run tests in subprocesses using subprocess and async_sub. Will poll 
+tests for return code and if tests don't return after TIME_OUT, will kill 
+process with os.kill.
+
+os.kill is defined on win32 platform using subprocess.Popen to call either 
+pskill orsystem $PATH. If not, the script will raise SystemExit. 
+
+taskkill is shipped with windows from XP on.
+pskill is available from SysInternals website
+
+Dependencies:
+    async_sub.py:
+        Requires win32 extensions when Run on windows:
+            Maybe able to get away with win32file.pyd, win32pipe.pyd zipped to 
+            about 35kbytes and ship with that.
+"""
+
+#################################### IMPORTS ###################################
+
+import sys, os, re, unittest, subprocess, time, optparse
+import pygame.threads, async_sub
+
+main_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
+test_subdir = os.path.join(main_dir, 'test')
+fake_test_subdir = os.path.join(test_subdir, 'run_tests__tests')
+
+sys.path.insert(0, test_subdir)
+
+import test_utils
+
+################################### CONSTANTS ##################################
+
+# If an xxxx_test.py takes longer than TIME_OUT seconds it will be killed
+TIME_OUT = 30
+
+# Any tests in IGNORE will not be ran
+IGNORE = (
+    "scrap_test",
+    "fake_time_out_test",
+)
+
+# Subprocess has less of a need to worry about interference between tests
+SUBPROCESS_IGNORE = (
+    "scrap_test",
+)
+
+################################################################################
+
+COMPLETE_FAILURE_TEMPLATE = """
+======================================================================
+ERROR: all_tests_for (%s.AllTestCases)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "test\%s.py", line 1, in all_tests_for
+
+subprocess completely failed with return code of %s
+
+"""  # Leave that last empty line else build page regex won't match
+
+RAN_TESTS_DIV = (70 * "-") + "\nRan"
+
+DOTS = re.compile("^([FE.]+)$", re.MULTILINE)
+
+TEST_MODULE_RE = re.compile('^(.+_test)\.py$')
+
+################################################################################
+# Set the command line options
+#
+
+opt_parser = optparse.OptionParser()
+opt_parser.add_option(
+     "-v",  "--verbose", action = 'store_true',
+     help   = "Be verbose in output (only single process mode)" )
+
+opt_parser.add_option (
+     "-i",  "--incomplete", action = 'store_true',
+     help   = "Fail incomplete tests (only single process mode)" )
+
+opt_parser.add_option (
+     "-s",  "--subprocess", action = 'store_true',
+     help   = "Run tests in subprocesses" )
+
+opt_parser.add_option (
+     "-t",  "--threaded", metavar = 'THREADS', type = 'int',
+     help   = "Run subprocessed tests in x THREADS" )
+
+opt_parser.add_option (
+     "-f",  "--fake", metavar = "DIR",
+     help   = "Run fake tests in %s"  % fake_test_subdir)
+
+options, args = opt_parser.parse_args()
+
+################################################################################
+# Change to working directory and compile a list of test modules
+# If options.fake, then compile list of fake xxxx_test.py from run_tests__tests
+#
+
+if options.fake:
+    test_subdir = os.path.join(fake_test_subdir, options.fake )
+    sys.path.append(test_subdir)
+
+os.chdir(main_dir)
+
+test_modules = []
+for f in os.listdir(test_subdir):
+    for match in TEST_MODULE_RE.findall(f):
+        test_modules.append(match)
+
+################################################################################
+# Run all the tests in one process 
+# unittest.TextTestRunner().run(unittest.TestSuite())
+# 
+
+if not options.subprocess:
+    suite = unittest.TestSuite()
+    runner = unittest.TextTestRunner()
+        
+    for module in [m for m in test_modules if m not in IGNORE]:
+        print 'loading ' + module
+        __import__( module )
+        test = unittest.defaultTestLoader.loadTestsFromName( module )
+        suite.addTest( test )
+    
+    test_utils.fail_incomplete_tests = options.incomplete
+    if options.verbose:
+        runner.verbosity = 2
+    
+    runner.run( suite )
+    
+    sys.exit()
+
+    ###########################
+    # SYS.EXIT() FLOW CONTROL #
+    ###########################
+
+################################################################################
+# Runs an individual xxxx_test.py test suite in a subprocess
+#
+
+def run_test(cmd):
+    module = os.path.basename(cmd).split('.')[0]
+    print 'loading %s' % module
+    ret_code, response = async_sub.proc_in_time_or_kill(cmd, time_out=TIME_OUT)
+    return module, ret_code, response
+
+################################################################################
+# Run all the tests in subprocesses
+#
+
+test_cmd = ('python %s/' % test_subdir) + '%s.py'
+# test_cmd += flags and options to pass on
+
+test_cmds = [ test_cmd % m for m in test_modules if 
+                         m not in SUBPROCESS_IGNORE ]
+
+t = time.time()
+
+if options.threaded:
+    test_results = pygame.threads.tmap (
+        run_test, test_cmds,
+        stop_on_error = False,
+        num_workers = options.threaded
+    )
+else:
+    test_results = map(run_test, test_cmds)
+
+t = time.time() - t
+
+################################################################################
+# Combine subprocessed TextTestRunner() results to mimick single run
+# Puts complete failures in a form the build page will pick up
+
+all_dots = ''
+failures = []
+complete_failures = 0
+
+for module, ret_code, ret in test_results:
+    if ret_code and ret_code is not 1:                                # TODO: ??
+        failures.append (
+            COMPLETE_FAILURE_TEMPLATE % (module, module, ret_code)
+        )
+        complete_failures += 1
+        continue
+
+    dots = DOTS.search(ret)
+    if not dots: continue                        # in case of empty xxxx_test.py
+    else: dots = dots.group(1)
+
+    all_dots += dots
+
+    if 'E' in dots or 'F' in dots:
+        failure = ret[len(dots):].split(RAN_TESTS_DIV)[0]
+        failures.append (
+            failure.replace( "(__main__.", "(%s." % module)
+        )
+
+total_fails, total_errors = all_dots.count('F'), all_dots.count('E')
+total_tests = len(all_dots)
+
+print all_dots
+if failures: print ''.join(failures).lstrip('\n')[:-1]
+print "%s %s tests in %.3fs\n" % (RAN_TESTS_DIV, total_tests, t)
+
+if not failures:
+    print 'OK'
+else:
+    print 'FAILED (%s)' % ', '.join (
+        (total_fails  and ["failures=%s" % total_fails] or []) +
+        (total_errors and ["errors=%s"  % total_errors] or [])
+    )
+
+################################################################################

test/run_tests__tests/all_ok/fake_1_test.py

+import unittest
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(True) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(True) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()

test/run_tests__tests/all_ok/fake_2_test.py

+import unittest
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(True) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(True) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()

test/run_tests__tests/all_ok/fake_3_test.py

+import unittest
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(True) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(True) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()

test/run_tests__tests/all_ok/fake_4_test.py

+import unittest
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(True) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(True) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()

test/run_tests__tests/all_ok/fake_5_test.py

+import unittest
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(True) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(True) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()

test/run_tests__tests/all_ok/fake_6_test.py

+import unittest
+
+class KeyModuleTest(unittest.TestCase):
+    def test_get_focused(self):
+        self.assert_(True) 
+
+    def test_get_mods(self):
+        self.assert_(True) 
+
+    def test_get_pressed(self):
+        self.assert_(True) 
+
+    def test_name(self):
+        self.assert_(True) 
+
+    def test_set_mods(self):
+        self.assert_(True) 
+
+    def test_set_repeat(self):
+        self.assert_(True) 
+
+if __name__ == '__main__':
+    unittest.main()

test/test_test_.py

+while True:
+    pass
+    

test/util/buildpage/callproc.py

+import subprocess
+import os
+import sys
+
+def ExecuteAssertSuccess(cmd, *args, **keywords):
+    retcode, output = GetReturnCodeAndOutput(cmd, *args, **keywords)
+    if retcode != 0:
+        if isinstance(cmd, str):
+            cmd_line = cmd
+        else:
+            cmd_line = " ".join(cmd)
+        raise Exception("calling: "+cmd_line+" failed with output:\n"+output)
+    return output
+
+def GetReturnCodeAndOutput(cmd, dir=None, env=None, bufsize=-1, lineprintdiv=1):
+    if isinstance(cmd, str):
+        print "executing:",cmd
+    else:           
+        print "executing:"," ".join(cmd)
+        if sys.platform == "darwin":
+            cmd = " ".join(cmd)
+    proc = subprocess.Popen(cmd, cwd=dir, env=env, shell=True, bufsize=bufsize, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    response = ""
+    finished = False
+    numlines = 0
+    while not finished or proc.poll() == None:
+        line = proc.stdout.readline()
+        if line == "":
+            finished = True
+        else:
+            numlines += 1
+            if numlines % lineprintdiv == 0:
+                sys.stdout.write(".")
+            response += line.replace("\r\n", "\n").replace("\r", "\n")
+    sys.stdout.write("\n")
+    return proc.wait(), response
+
+def InteractiveGetReturnCodeAndOutput(cmd, input_string, dir=None, env=None, bufsize=-1):
+    if isinstance(cmd, str):
+        print "executing:",cmd
+    else:           
+        print "executing:"," ".join(cmd)
+        if sys.platform == "darwin":
+            cmd = " ".join(cmd)
+    proc = subprocess.Popen(cmd, cwd=dir, env=env, shell=True, bufsize=bufsize, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    print "---------------"
+    response = proc.communicate(input_string)[0]
+    return proc.wait(), response

test/util/buildpage/config/build_config.ini.template

+[DEFAULT]
+python_path=C:/Python25
+make_package=bdist_howtomakepackage
+package_mask=*.ext
+test_dir_subpath=Lib/site-packages
+
+[build_env]

test/util/buildpage/config/upload.ini.template

+[DEFAULT]
+host=website.com
+user=user
+remote_path=path/to/builds
+scp=scp %(local_path)s %(user)s@%(host)s:%(remote_path)s/%(remote_file)s

test/util/buildpage/update_build.py

+import os
+import sys
+import re
+import callproc
+import time
+import glob
+import ConfigParser
+import shutil
+import upload_results
+
+def write_file_lines(filename, line_list):
+    file_obj = file(filename, "w")
+    for line in line_list:
+        if not isinstance(line, str):
+            line = str(line)
+        file_obj.write(line)
+        file_obj.write("\n")
+    file_obj.close()
+    
+def re_sub_file(file_path, match, replace):
+    content = file(file_path, "r").read()
+    content, count = re.subn(match, replace, content)
+    assert(count > 0)
+    output = file(file_path, "w")
+    output.write(content)
+    output.close()
+
+def assert_path_exists(path, description):
+    if not os.path.exists(path):
+        raise Exception("ERROR: can't find "+description+" at : "+path)
+
+def cleardir(path_to_clear):
+    for root, dirs, files in os.walk(path_to_clear, topdown=False):
+        for name in files:
+            os.remove(os.path.join(root, name))
+        for name in dirs:
+            os.rmdir(os.path.join(root, name))
+
+def GetAndBrandLatestFromSVN(src_path):
+    output = callproc.ExecuteAssertSuccess(["svn","co","svn://seul.org/svn/pygame/trunk",src_path])
+    
+    rev_match = re.search(r"(At)|(Checked out) revision ([0-9]+)\.", output)
+    latest_rev = int(rev_match.group(3))
+    
+    callproc.ExecuteAssertSuccess(["svn","revert",src_path,"-R"])
+    
+    version_source = src_path + '/lib/version.py'
+    re_sub_file(version_source, r"(ver\s*=\s*)'([0-9]+\.[0-9]+\.[0-9]+[^']*)'", r"\1'\2-svn"+str(latest_rev)+"'")
+    
+    return latest_rev        
+
+def AppendBlameInfoToErrorsByFile(src_root, errors_by_file, line_func = lambda(x) : int(x[0])):
+    for error_file in errors_by_file:
+        print "blame for",error_file
+        ret_code, blame_output = callproc.GetReturnCodeAndOutput(["svn", "blame", error_file], src_root, lineprintdiv=100)
+        if ret_code == 0:
+            blame_lines = blame_output.split('\n')
+            for error in errors_by_file[error_file]:
+                line = line_func(error)
+                line_match = re.match(r"\s*([0-9]+)\s+([^ ]+)\s([^\r\n]*)", blame_lines[line - 1])
+                rev = line_match.group(1)
+                user = line_match.group(2)
+                line = line_match.group(3)
+                error.append(user)
+                error.append(line)
+                error.append(rev)
+
+def GetBuildWarningsHTML(src_path, build_output):
+    warnings_by_file = {}
+    warning_matches = re.findall(r"^([^\(\s]+\.c)(?:\(|:)([0-9]+)(?:\)|:) ?:? warning:? ([^\r\n]+)[\r\n]", build_output, re.MULTILINE)
+    if len(warning_matches) > 0:
+        print "WARNING - found",len(warning_matches),"warnings"
+        for warning_match in warning_matches:
+            warning_file, line, message = warning_match
+            if warning_file not in warnings_by_file:
+                warnings_by_file[warning_file] = []
+            warnings_by_file[warning_file].append([line, message])
+
+        AppendBlameInfoToErrorsByFile(src_path, warnings_by_file)
+                
+        web_friendly_warnings = []
+        for warning_file in warnings_by_file:
+            for warning in warnings_by_file[warning_file]:
+                file_location = os.path.split(warning_file)[1] + ":" + warning[0] + " last rev: " + warning[-1] + ":" + warning[-3]
+                code_line = warning[-2].replace("<", "&lt;").replace(">", "&gt;").replace(" ", "&nbsp;")
+                web_friendly_warnings.append(file_location + "<br>warning:" + warning[1] + '<br><code>' + code_line + '</code>')                
+        return "<hr>".join(web_friendly_warnings)
+    else:
+        print "no warnings found in:"
+        print build_output
+        return ""
+    
+script_path = os.path.split(sys.argv[0])[0]
+print 'executing pygamebuilder from:',script_path
+if script_path != "": 
+    os.chdir(script_path)
+print "-------------------------"
+
+if not os.path.exists("./source"):
+    os.mkdir("./source")
+src_path = './source/pygame'
+latest_rev = GetAndBrandLatestFromSVN(src_path)
+
+if not os.path.exists("./output"):
+    os.mkdir("./output")
+    
+if len(sys.argv) > 1:
+    config_file_list = "./config/build_"+sys.argv[1:]+".ini"
+else:
+    config_file_list = glob.glob("./config/build_*.ini")
+
+for config_file in config_file_list:
+  
+    config_data = ConfigParser.SafeConfigParser()
+    config_data.read([config_file])
+    platform_id = os.path.split(config_file)[1].replace(".ini", "").replace("build_", "")
+
+    last_rev_filename = "./output/last_rev_"+platform_id+".txt"
+
+    assert(config_data.has_option("DEFAULT", "python_path"))
+    python_path = config_data.get("DEFAULT", "python_path")
+    assert_path_exists(python_path, "expected python version")
+
+    print "-------------------------"
+    print "building",platform_id,"with python at",python_path
+    try:
+        previous_rev = int(file(last_rev_filename, "r").read())
+    except:
+        print "WARNING: could not find last rev built"
+        previous_rev = 0
+    
+    if latest_rev == previous_rev:
+        print "exiting - already built rev",latest_rev
+    else:
+        print "building",latest_rev,"(last built %d)" % previous_rev
+        valid_build_attempt = True
+        
+        build_env = {}
+        for option in config_data.options("build_env"):
+            build_env[option] = config_data.get("build_env", option)
+            
+        ret_code, output = callproc.InteractiveGetReturnCodeAndOutput([python_path, "config.py"], "Y\nY\nY\n", src_path, build_env)
+        print output
+        if ret_code != 0:
+            print "ERROR running config.py!"
+            assert(ret_code == 0)
+    
+        dist_path = src_path + "/dist"
+        if os.path.exists(dist_path):
+            cleardir(dist_path)
+
+        package_command = config_data.get("DEFAULT", "make_package")
+        ret_code, output = callproc.GetReturnCodeAndOutput([python_path, "setup.py", package_command], src_path, build_env)
+        if ret_code == 0:
+            build_warnings = GetBuildWarningsHTML(src_path, output)
+            
+            package_mask = config_data.get("DEFAULT", "package_mask")
+            installer_dist_path = glob.glob(dist_path+"/"+package_mask)[0]
+            print "got installer at:", installer_dist_path
+            installer_filename = os.path.split(installer_dist_path)[1]
+            installer_path = "./output/"+installer_filename
+            shutil.move(installer_dist_path, installer_path)
+            
+            temp_install_path = os.path.join(os.getcwd(), "install_test")
+            if os.path.exists(temp_install_path):
+                cleardir(temp_install_path)
+            else:
+                os.mkdir(temp_install_path)
+
+            test_subpath = config_data.get("DEFAULT", "test_dir_subpath")
+            temp_install_pythonpath = os.path.join(temp_install_path, test_subpath)
+            os.makedirs(temp_install_pythonpath)
+            
+            test_env = {"PYTHONPATH":temp_install_pythonpath}
+            install_env = build_env.copy()
+            install_env.update(test_env)
+
+            print "installing to:",temp_install_path
+            callproc.ExecuteAssertSuccess([python_path, "setup.py", "install", "--prefix", temp_install_path], src_path, install_env)
+        
+            print "running tests..."
+            ret_code, output = callproc.GetReturnCodeAndOutput([python_path, "run_tests.py"], src_path, test_env)
+            error_match = re.search("FAILED \([^\)]+=([0-9]+)\)", output)
+            if ret_code != 0 or error_match != None:
+                errors_by_file = {}
+                error_matches = error_matches = re.findall(r"^((?:ERROR|FAIL): [^\n]+)\n+-+\n+((?:[^\n]+\n)+)\n", output, re.MULTILINE)
+                if len(error_matches) > 0:
+                    print "TESTS FAILED - found",len(error_matches),"errors"
+                    for error_match in error_matches:
+                        message, traceback = error_match
+                        trace_top_match = re.search(r'File "([^"]+)", line ([0-9]+)', traceback)
+                        error_file, line = trace_top_match.groups()
+                        if error_file not in errors_by_file:
+                            errors_by_file[error_file] = []
+                        errors_by_file[error_file].append([line, message, traceback])
+                    AppendBlameInfoToErrorsByFile(src_path, errors_by_file)
+                    
+                    for error_file in errors_by_file:
+                        print "test failures in:", error_file
+                        for error in errors_by_file[error_file]:
+                            print error
+                            
+                    build_result = "Build Successful, Tests FAILED"                            
+                    web_friendly_errors = []
+                    for error_file in errors_by_file:
+                        for error in errors_by_file[error_file]:
+                            file_location = os.path.split(error_file)[1] + ":" + error[0] + " last rev: " + error[-1] + ":" + error[-3] 
+                            web_friendly_errors.append(file_location + "<br>" + error[1])                
+                    build_errors = "<hr>".join(web_friendly_errors)
+                else:
+                    build_result = "Build Successful, Invalid Test Results"
+                    build_errors = output.replace("\n", "<br>")                            
+                    print "ERROR - tests failed! could not parse output:"
+                    print output
+            else:   
+                print "success! uploading..."
+                result_filename = "./output/prebuilt_%s.txt" % platform_id
+                write_file_lines(result_filename, [str(latest_rev), time.strftime("%Y-%m-%d %H:%M"), "uploading"])
+                upload_results.scp(result_filename)
+                upload_results.scp(installer_path)
+                write_file_lines(result_filename, [str(latest_rev), time.strftime("%Y-%m-%d %H:%M"), installer_filename])
+                upload_results.scp(result_filename)
+                build_result = "Build Successful, Tests Passed"                            
+                tests_run = re.findall(r"^loading ([\r\n]+)$", output, re.MULTILINE)
+                
+                test_text = [test + " passed" for test in tests_run] 
+                build_errors = "<br>".join(test_text)
+        else:
+            error_matches = re.findall(r"^([^\(\s]+\.c)(?:\(|:)([0-9]+)(?:\)|:) ?:? error:? ([^\r\n]+)[\r\n]", output, re.MULTILINE)
+            if len(error_matches) > 0:
+                print "FAILED - found",len(error_matches),"errors"
+                errors_by_file = {}
+                for error_match in error_matches:
+                    error_file, line, message = error_match
+                    if error_file not in errors_by_file:
+                        errors_by_file[error_file] = []
+                    errors_by_file[error_file].append([line, message])
+
+                AppendBlameInfoToErrorsByFile(src_path, errors_by_file)
+                        
+                for error_file in errors_by_file:
+                    print "errors in:",error_file
+                    for error in errors_by_file[error_file]:
+                        print error
+
+                build_result = "Build FAILED, Tests not run"                            
+                web_friendly_errors = []
+                for error_file in errors_by_file:
+                    for error in errors_by_file[error_file]:
+                        file_location = os.path.split(error_file)[1] + ":" + error[0] + " last rev: " + error[-1] + ":" + error[-3] 
+                        web_friendly_errors.append(file_location + "<br>ERROR:" + error[1])                
+                build_errors = "<hr>".join(web_friendly_errors)
+            else:
+
+                link_error_matches = re.findall(r"^([^\(\s]+)\.obj : error ([^\r\n]+)[\r\n]", output, re.MULTILINE)
+                if len(link_error_matches) > 0:
+                    build_result = "Link FAILED, Tests not run"                           
+                    print "FAILED - found",len(link_error_matches),"errors"
+                    build_errors = ""
+                    for error_match in link_error_matches:
+                        source_name, message = error_match
+                        build_errors += source_name + " : " + message + "<br>"
+                        
+                else:                
+                    exception_match = re.search(r"^Traceback \(most recent call [a-z]+\):[\r\n]+(.+[^\r\n]+Error:[^\r\n]+)", output, re.MULTILINE | re.DOTALL)
+                    if exception_match != None:
+                        build_result = "Build FAILED, Tests not run"                            
+                        build_errors = exception_match.group(1).replace("\n", "<br>")
+                            
+                    else:
+                        build_result = "Build FAILED, Tests not run"                           
+                        build_errors = ""
+                        error_matches = re.findall(r"^error: ([^\r\n]+)", output, re.MULTILINE)
+                        for error_match in error_matches:
+                            build_errors += error_match + "<br>"
+    
+                        print "FAILED - unrecognized errors in:"
+                        print output
+                    
+            build_warnings = GetBuildWarningsHTML(src_path, output)
+                    
+        if valid_build_attempt:            
+            result_filename = "./output/buildresults_%s.txt" % platform_id
+            write_file_lines(result_filename, [latest_rev, time.strftime("%Y-%m-%d %H:%M"), build_result, build_errors, build_warnings])
+            upload_results.scp(result_filename)
+            file(last_rev_filename, "w").write(str(latest_rev))
+            print "COMPLETED build of",latest_rev
+            print "-------------------------"
+        else:
+            print "FAILED build attempt of",latest_rev
+    

test/util/buildpage/upload_results.py

+import os
+import callproc
+import ConfigParser
+
+def scp(local_path, remote_file = None):
+    if remote_file == None:
+        remote_file = os.path.split(local_path)[1]
+    config_file = "./config/upload.ini"
+    config_data = ConfigParser.SafeConfigParser()
+    config_data.read([config_file])
+
+    file_vars = {"local_path":local_path, "remote_file":remote_file}
+    command = config_data.get("DEFAULT", "scp", vars = file_vars)
+    callproc.ExecuteAssertSuccess(command)
+