Commits

Richard Mills  committed 1b6224a Merge

Automerge.

  • Participants
  • Parent commits 84c433e, a6832b9

Comments (0)

Files changed (36)

File docs/user_manual/Makefile

+# Sarat: Simple makefile to make pdf
+# If references are added, uncomment bibtex etc.
+
+FILE=quick_guide
+all: $(FILE)
+$(FILE):
+	pdflatex $(FILE)
+	pdflatex $(FILE)
+	# bibtex $(FILE)
+	# bibtex $(FILE)
+	# pdflatex $(FILE)
+	# pdflatex $(FILE)
+	# dvips $(FILE).dvi
+	# dvipdf $(FILE).dvi
+	# ps2pdf $(FILE).ps
+clean:
+	rm -f *~ *.log *.aux *.lof *.lot *.toc *.bbl *.blg *.loa *.out
+allclean:
+	rm -f *~ *.log *.aux *.lof *.lot *.toc *.bbl *.blg *.loa *.out *.dvi *.ps *.pdf 

File docs/user_manual/installation.tex

 Thus for compiling with the supercritical CO$_2$ option use: {\tt make ssco2=1 pflotran}.
 See the PFLOTRAN \verb|makefile| for additional options.
 
-\subsection{Parallel I/O using Sarat Sreepathi's PARALLELIO\_LIB with PFLOTRAN}
+\subsection{Parallel I/O using Sarat Sreepathi's SCORPIO library with PFLOTRAN}
 \label{parallelio}
 
-The parallel I/O library enables a scalable general purpose parallel I/O capability for HPC by taking advantage of existing parallel I/O libraries, such as HDF5 which are being widely used by scientific applications, and modifying these algorithms to better scale to larger number of processors. The library has been tested with MPICH-3.0.2 and OpenMPI-1.6.
+The SCORPIO (parallel I/O library) enables a scalable general purpose parallel I/O capability for HPC by taking advantage of existing parallel I/O libraries, such as HDF5 which are being widely used by scientific applications, and modifying these algorithms to better scale to larger number of processors. The library has been tested with MPICH-3.0.2 and OpenMPI-1.6.
 
 \normalsize
-Values for the variables {\footnotesize\tt HDF5\_READ\_GROUP\_SIZE} and {\footnotesize\tt HDF5\_WRITE\_GROUP\_SIZE} must be set in the input file. 
-Typical values are to set the
-write group size equal to the number of processes on a compute node (typically 16 or 32). 
+It is recommended that values for the variables {\footnotesize\tt HDF5\_READ\_GROUP\_SIZE} and \linebreak {\footnotesize\tt HDF5\_WRITE\_GROUP\_SIZE} be set in the input file. 
+If they are unset, {\footnotesize\tt HDF5\_READ\_GROUP\_SIZE} is set to total MPI ranks and {\footnotesize\tt HDF5\_WRITE\_GROUP\_SIZE} is set to 1 by default.
+
+Typical values are to set the write group size equal to the number of processes on a compute node (typically 16 or 32). 
 %If that still results in a penalty, decrease it further. 
 A much higher read group size is preferred, e.g. use 512 when running on 512 cores so that one process reads the input file and broadcasts relevant sections.
 Put {\footnotesize\tt HDF5\_WRITE\_GROUP\_SIZE} under the {\footnotesize\tt OUTPUT} keyword:
 END
 \end{Verbatim}
 \normalsize
-and {\tt HDF5\_READ\_GROUP\_SIZE} in the main body of the input file:
+and {\footnotesize\tt HDF5\_READ\_GROUP\_SIZE} in the main body of the input file:
 \footnotesize
 \begin{Verbatim}
 HDF5_READ_GROUP_SIZE 1024
 \end{Verbatim}
 \normalsize
-Number of MPI tasks should be an exact multiple of {\footnotesize\tt HDF\_READ\_GROUP\_SIZE}.
+% (Sarat: Not necessary) Number of MPI tasks should be an exact multiple of {\footnotesize\tt HDF\_READ\_GROUP\_SIZE}.
 
-For more details on the I/O library, please see Appendix A (Figs. A4, A5) in Sarat Sreepathi's <admin@sarats.com> \href{http://www.lib.ncsu.edu/resolver/1840.16/8317}{dissertation}.
+For more details on the SCORPIO library, please see Appendix A in Sarat Sreepathi's \linebreak <admin@sarats.com> \href{http://www.lib.ncsu.edu/resolver/1840.16/8317}{dissertation}.
 
-Instructions for downloading and installing the parallel I/O library for use with PFLOTRAN is provided below. Note that this software is separate from PFLOTRAN and under a LGPL.
+Instructions for downloading and installing the SCORPIO library for use with PFLOTRAN is provided below. Note that this software is separate from PFLOTRAN and under a LGPL.
 \begin{enumerate}
 
-\item Download parallelio\_lib source code for building the PARALLELIO library:
+\item Download source code for building the SCORPIO library:
 
-{\small\tt svn co http://ascem-io.secure-water.org/ascem-io DIRNAME}
+{\small\tt svn co http://ascem-io.secure-water.org/ascem-io/scorpio DIRNAME}
 
-where {\footnotesize\tt DIRNAME} is the installation directory (Default: {\footnotesize\tt ascem-io}). 
+where {\footnotesize\tt DIRNAME} is the installation directory (Default: {\footnotesize\tt scorpio}). 
 
 The username and password are:
 %\href{https://bitbucket.org/pflotran/pflotran-dev/wiki/Documentation/Strata}{Strata}
 
 password: {\footnotesize\tt gr0undw@t3r}
 
-\item Compile PARALLELIO library:
-
-To compile the parallel library first modify the Makefile by commenting out
-{\footnotesize\tt MACHINE=cygnus} on line 61, and adding the lines below following the {\footnotesize\tt endif} statement (line 98):
-\footnotesize
-\begin{Verbatim}
-ifeq ($(MACHINE),jaguarpf)
-	CC=cc
-	FC=ftn
-	LINKER=ftn
-	CFLAGS+= -O3 
-	FFLAGS+= -O3 
-	LDFLAGS+= -lhdf5 -lz
-endif
-
-CC=${PETSC_DIR}/${PETSC_ARCH}/bin/mpicc
-FC=${PETSC_DIR}/${PETSC_ARCH}/bin/mpif90
-LINKER=${FC}
-CFLAGS+= -I${PETSC_DIR}/${PETSC_ARCH}/include 
-FFLAGS+= -O3 
-LDFLAGS+= -lparallelio -Wl,-L${PETSC_DIR}/${PETSC_ARCH}/lib -lhdf5 -lz 
-\end{Verbatim}
-
-%\normalsize
-%It may also be necessary to remove tabs from the file {\tt piof.h} located in the {\tt src} directory.
-
-\footnotesize
-{\tt cd DIRNAME/src}
-
-{\tt make} \hfill {\it (compile with {\tt mpicc})}
-\normalsize
-
-This will build the library {\footnotesize\tt libparallelio.a}.
-
-\item Compile PFLOTRAN:
-
-Define environmental variable: {\tt PARALLELIO\_LIB} giving path to PARALLELIO library:
-
-\footnotesize
-{\tt export PARALLELIO\_LIB=\$PWD} \hfill {\it (bash shell)}
+\item Compile SCORPIO library:
+First, set the environment variable {\footnotesize\tt SCORPIO\_DIR} to a directory where you wish to install the library files. 
+Please make sure that you have the correct permissions to write to that location. E.g., you can use something like {\footnotesize\tt \$\{HOME\}/soft/scorpio}.
+\footnotesize {\tt export SCORPIO\_DIR=<your-iolib-install-dir>} \hfill {\it (bash shell)}
 \normalsize
 
 or
 
 \footnotesize
-{\tt setenv PARALLELIO\_LIB \$PWD} \hfill {\it (tcsh/csh shell)}
+{\tt setenv SCORPIO\_DIR <your-iolib-install-dir>} \hfill {\it (tcsh/csh shell)}
 
-{\tt cd PFLOTRAN\_DIR/src/pflotran}
+To compile the library, check to make sure that the Makefile has the right settings for your machine.
+Typically, the current configuration suffices. So you can just follow the instructions below.
+For advanced users, please edit the section for {\footnotesize\tt pflotran\_machine} in the Makefile as desired.
+% So just uncomment the line {\small\tt MACHINE=pflotran\_machine}.
+% \footnotesize
+% \begin{Verbatim}
+% 66 # Note: PFLOTRAN USERS: Make sure to uncomment line below 
+%    OR use make MACHINE=pflotran_machine
+% 67 MACHINE=pflotran_machine
+% ...
+% 73 ifeq ($(MACHINE),pflotran_machine)
+% 74     CC=${PETSC_DIR}/${PETSC_ARCH}/bin/mpicc
+% 75     FC=${PETSC_DIR}/${PETSC_ARCH}/bin/mpif90
+% 76     LINKER=${FC}
+% 77     CFLAGS+= -I${PETSC_DIR}/${PETSC_ARCH}/include -O3
+% 78     FFLAGS+= -I${PETSC_DIR}/${PETSC_ARCH}/include -O3 
+% 79     LDFLAGS+= -Wl,-L${PETSC_DIR}/${PETSC_ARCH}/lib -lhdf5 -lz 
+% 80 endif
+% \end{Verbatim}
 
-{\tt make have\_parallelio\_lib=1 pflotran}
+\footnotesize
+{\tt cd DIRNAME/src}
+
+
+{\tt make MACHINE=pflotran\_machine}
+
+{\tt make install} \hfill {\it (compile with {\tt mpicc})}
+\normalsize
+
+This will build the library {\footnotesize\tt libscorpio.a} and copy corresponding files to {\footnotesize\tt SCORPIO\_DIR/lib} and {\footnotesize\tt SCORPIO\_DIR/include} directories.
+
+\item Compile PFLOTRAN:
+
+Please ensure that environmental variable: {\footnotesize\tt SCORPIO\_DIR} is pointed to \linebreak {\footnotesize\tt <your-iolib-install-dir>}:
+
+{\footnotesize\tt cd PFLOTRAN\_DIR/src/pflotran}
+
+{\footnotesize\tt make scorpio=1 pflotran}
 \normalsize
 \end{enumerate}
 
 \scriptsize
 \begin{verbatim}
-README
 -------------------------------------------------------------------------------
-ASCEM-IO
+SCORPIO
 Scalable Parallel I/O module for Environmental Management Applications
 -------------------------------------------------------------------------------
 This library provides software that read/write data sets from/to parallel file 
 -------------------------------------------------------------------------------
 COPYRIGHT AND LICENSE 
 -------------------------------------------------------------------------------
-ASCEM-IO is distrubuted under the terms of the GNU Lesser General Public 
+SCORPIO is distrubuted under the terms of the GNU Lesser General Public 
 License (LGPL). The copyright is held jointly by North Carolina State University 
 and Pacific Northwest National Laboratory. 
 
 HDF5 libraries (preferably with parallel(MPI) support)
 Optional: Fortran (for Fortran example)
 
-After downloading ASCEM-IO and gathering details of HDF5 installation, 
-the following commands can be used to build and install ASCEM-IO: 
+After downloading SCORPIO and gathering details of HDF5 installation, 
+the following commands can be used to build and install SCORPIO: 
 
-	cd <ASCEM-IO check out directory>/src
+	cd <SCORPIO check out directory>/src
 	make CC=<C-compiler> HDF5_INCLUDE_DIR=<location of the HDF5 include directory>
-	make ASCEMIO_INSTALL_DIR=<user defined install location> install
+	make SCORPIO_INSTALL_DIR=<user defined install location> install
 
 In this case, CC refers to C compiler with MPI support, e.g., mpicc.
 

File regression_tests/Makefile

 
 TEST_OPTIONS =
 
-# make VERBOSE=true check
-ifdef VERBOSE
-	TEST_OPTIONS += --verbose
-endif
-
 # make PERFORMANCE=true check
 ifdef PERFORMANCE
 	TEST_OPTIONS += --check-performance
 endif
 
-# tests that are run to verify pflotran is built correctly
-STANDARD_TESTS = \
-	ascem-geochemistry \
-	ascem-geochemistry-parallel \
-	default-flow \
-	default-transport \
-	default-geochemistry \
-	default-parallel \
-	default-discretization-umesh \
-	ngee-biogeochemistry
+ifdef BACKTRACE
+	TEST_OPTIONS += --backtrace
+endif
 
-# regression tests for developers changing pflotran
-DEV_TESTS = \
-	cu-leaching
+#
+# standard tests that are run to verify pflotran is built correctly
+#
+STANDARD_CFG = \
+	ascem/batch/batch.cfg \
+	ascem/1d/1d-calcite/1d-calcite.cfg \
+	default/543/543.cfg \
+	default/anisothermal/anisothermal.cfg \
+	default/column/column.cfg \
+	default/infiltrometer/infiltrometer.cfg \
+	default/condition/condition.cfg \
+	default/multicontinuum/multicontinuum.cfg \
+	ngee/ngee.cfg \
+	shortcourse/copper_leaching/cu_leaching.cfg
 
-check : $(STANDARD_TESTS)
+STANDARD_PARALLEL_CFG = \
+	ascem/1d/1d-calcite/1d-calcite.cfg \
+	default/543/543.cfg
 
-test : $(STANDARD_TESTS) $(DEV_TESTS)
+ifneq ($(strip $(PARMETIS_LIB)),)
+	STANDARD_CFG += \
+		default/discretization/discretization.cfg
 
-ascem-geochemistry : 
+	STANDARD_PARALLEL_CFG = \
+		default/discretization/discretization.cfg
+else
+	@echo "********************************************************"
+	@echo "  PFloTran does not appear to be compiled with Parmetis."
+	@echo "  Skipping unstructured mesh tests."
+	@echo "********************************************************"
+endif
+
+#
+# domain specific problems
+#
+GEOCHEMISTRY_CFG = \
+	ascem/1d/1d-calcite/1d-calcite.cfg \
+	ascem/batch/batch.cfg \
+	default/543/543.cfg \
+	default/anisothermal/anisothermal.cfg \
+	default/column/column.cfg \
+	default/multicontinuum/multicontinuum.cfg \
+	ngee/ngee.cfg \
+	shortcourse/1D_Calcite/calcite.cfg \
+	shortcourse/copper_leaching/cu_leaching.cfg
+
+FLOW_CFG = \
+	default/543/543.cfg \
+	default/condition/condition.cfg \
+	default/infiltrometer/infiltrometer.cfg \
+	shortcourse/1D_variably_saturated_flow/vsat_flow.cfg \
+	shortcourse/copper_leaching/cu_leaching.cfg
+
+TRANSPORT_CFG = \
+	default/543/543.cfg \
+	default/column/column.cfg \
+	default/multicontinuum/multicontinuum.cfg \
+
+MESH_CFG = \
+	default/discretization/discretization.cfg
+
+
+check : standard standard_parallel
+
+test : standard standard_parallel
+
+standard :
 	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		-r ascem --suite geochemistry 
+		 --suite standard --config-files $(STANDARD_CFG)
 
-ascem-geochemistry-parallel :
+standard_parallel :
 ifneq ($(strip $(MPIEXEC)),)
 	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		--mpiexec $(MPIEXEC) -r ascem/1d --suite parallel
+		--mpiexec $(MPIEXEC)  --suite standard_parallel \
+	--config-files $(STANDARD_PARALLEL_CFG)
 else
 	@echo "********************************************************"
 	@echo "  Could not find mpiexec."
 	@echo "********************************************************"
 endif
 
-default-discretization-umesh :
+geochemistry :
+	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
+		--config-files $(GEOCHEMISTRY_CFG) --suite geochemistry 
+
+flow :
+	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
+		--config-files $(FLOW_CFG) --suite flow
+
+transport :
+	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
+		--config-files $(TRANSPORT_CFG) --suite transport
+
+mesh :
 ifneq ($(strip $(PARMETIS_LIB)),)
 	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		-r default/discretization --suite mesh
+		--config-files $(MESH_CFG) --suite mesh
 else
 	@echo "********************************************************"
 	@echo "  PFloTran does not appear to be compiled with Parmetis."
 	@echo "********************************************************"
 endif
 
-default-flow : 
-	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		-r default --suite flow 
-
-default-transport : 
-	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		-r default --suite transport 
-
-default-geochemistry : 
-	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		-r default --suite geochemistry 
-
-default-parallel :
-ifneq ($(strip $(MPIEXEC)),)
-	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		--mpiexec $(MPIEXEC) -r default --suite parallel
-else
-	@echo "********************************************************"
-	@echo "  Could not find mpiexec."
-	@echo "  Skipping parallel tests."
-	@echo "********************************************************"
-endif
-
-cu-leaching :
-	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
-		-c shortcourse/copper_leaching/cu_leaching.cfg --test cu_leaching
-
 ngee-biogeochemistry : 
 	-$(PYTHON) $(TEST_MANAGER) -e $(PFLOTRAN) $(TEST_OPTIONS) \
 		-r ngee --suite biogeochemistry 

File regression_tests/ascem/1d/1d-calcite/1d-calcite.cfg

-[executable]
-input arg = -pflotranin
-input suffix = in
-output arg = -output_prefix
-
 [suites]
 geochemistry = 1d-calcite
 parallel = 1d-calcite-np2
+standard = 1d-calcite
+standard_parallel = 1d-calcite-np2
 
 [default-test-criteria]
 # default criteria for all tests, can be overwritten by specific tests

File regression_tests/ascem/batch/batch.cfg

-[executable]
-# maybe just hard code this in python?
-input arg = -pflotranin
-input suffix = in
-output arg = -output_prefix
-
-# by default we will assume input file = test-name.in
-# by default we will assume output file = test-name.regression.gold (or something)
-
 [suites]
 geochemistry = carbonate-unit-activity carbonate-debye-huckel-activity
                ca-carbonate-unit-activity ca-carbonate-debye-huckel-activity
                calcite-kinetics calcite-kinetics-volume-fractions 
                general-reaction ion-exchange-valocchi surface-complexation-1
 
+standard = carbonate-unit-activity carbonate-debye-huckel-activity
+           ca-carbonate-unit-activity ca-carbonate-debye-huckel-activity
+           calcite-kinetics calcite-kinetics-volume-fractions 
+           general-reaction ion-exchange-valocchi surface-complexation-1
+
 [default-test-criteria]
 # default criteria for all tests, can be overwritten by specific tests
 time = 50 percent

File regression_tests/default/543/543.cfg

 geochemistry = 543_hanford_srfcplx_base 543_hanford_srfcplx_param 543_hanford_srfcplx_mr
 parallel = 543_flow-np8 543_flow_and_tracer-np8 543_hanford_srfcplx_param-np8
 
+standard = 543_flow 
+           543_flow_and_tracer
+           543_hanford_srfcplx_base
+           543_hanford_srfcplx_param 
+           543_hanford_srfcplx_mr
+standard_parallel = 543_flow-np8
+                    543_flow_and_tracer-np8
+                    543_hanford_srfcplx_param-np8
+
 [default-test-criteria]
 # default criteria for all tests, can be overwritten by specific tests
 time = 500 percent

File regression_tests/default/anisothermal/anisothermal.cfg

 [suites]
-#flow = 
-#transport =
 geochemistry = thc_1d
-#parallel =
+standard = thc_1d
+
 
 [default-test-criteria]
 # default criteria for all tests, can be overwritten by specific tests

File regression_tests/default/column/column.cfg

 [suites]
-#flow =
 transport = tracer
 geochemistry = solute_KD ABC_general_KD ABC_microbial valocchi_ionx
-#parallel = 
+standard = tracer
+           solute_KD
+           ABC_general_KD
+           ABC_microbial
+           valocchi_ionx
 
 [default-test-criteria]
 # default criteria for all tests, can be overwritten by specific tests

File regression_tests/default/condition/condition.cfg

 [suites]
+standard = datum_dataset 543_datum_dataset 543_timeseries
 flow = datum_dataset 543_datum_dataset 543_timeseries
 #transport =
 #geochemistry =

File regression_tests/default/discretization/discretization.cfg

 [suites]
 mesh = mixed_implicit mixed_explicit
-#flow =
-#transport =
-#geochemistry =
 parallel = mixed_implicit-np4 mixed_explicit-np4
+standard = mixed_implicit mixed_explicit
+standard_parallel = mixed_implicit-np4 mixed_explicit-np4
 
 [default-test-criteria]
 # default criteria for all tests, can be overwritten by specific tests

File regression_tests/default/infiltrometer/infiltrometer.cfg

 [suites]
 flow = 16m
-#transport =
-#geochemistry = 
-#parallel =
+standard = 16m
 
 [default-test-criteria]
 # default criteria for all tests, can be overwritten by specific tests

File regression_tests/default/multicontinuum/multicontinuum.cfg

 [suites]
+standard = tracer_1D reaction_log10
 #flow = 
 transport = tracer_1D
 geochemistry = reaction_log10

File regression_tests/ngee/ngee.cfg

 [suites]
 #flow = 
 #transport =
+geochemistry = CLM-CN
 biogeochemistry = CLM-CN
+standard = CLM-CN
 #parallel =
 
 [default-test-criteria]

File regression_tests/regression-tests.py

 
 import argparse
 from collections import deque
+import datetime
 import math
 import os
 import pprint
 else:
     import configparser as config_parser
 
+STATUS = 0
+WARNING = 1
+ERROR = 2
+NUM_TESTS = 3
 
 class RegressionTest(object):
     """
         self._pprint = pprint.PrettyPrinter(indent=2)
         self._txtwrap = textwrap.TextWrapper(width=78, subsequent_indent=4*" ")
         self._debug = False
-        self._verbose = False
         self._executable = None
-        self._input_arg = "-pflotranin"
+        self._input_arg = "-input_prefix"
         self._input_suffix = "in"
-        self._output_arg = "-output_prefix"
         self._np = None
         self._timeout = 60.0
         self._check_performance = False
         message += "    executable args :\n"
         message += "        input arg : {0}\n".format(self._input_arg)
         message += "        input suffix : {0}\n".format(self._input_suffix)
-        message += "        output arg : {0}\n".format(self._output_arg)
         message += "    test criteria :\n"
         for k in self._tolerance:
             message += "        {0} : {1} [{2}]\n".format(
     def name(self):
         return self._test_name
 
-    def run(self, mpiexec, executable, dry_run, verbose):
+    def run(self, mpiexec, executable, dry_run, status, testlog):
         """
         Build up the run command, including mpiexec, np, pflotran,
         input file, output file. Then run the job as a subprocess.
             command.append("-np")
             if self._np is None:
                 self._np = '1'
-                if verbose:
-                    message = self._txtwrap.fill(
-                        "WARNING : mpiexec specified for test '{0}', "
-                        "but the test section does not specify the number "
-                        "of parallel jobs! Running test as "
-                        "serial.".format(self.name()))
-                    print(message)
+                message = self._txtwrap.fill(
+                    "WARNING : mpiexec specified for test '{0}', "
+                    "but the test section does not specify the number "
+                    "of parallel jobs! Running test as "
+                    "serial.".format(self.name()))
+                print(message, file=testlog)
+                status[WARNING] = 1
             command.append(self._np)
         else:
             if self._np is not None:
                                 "provided.".format(self.name()))
 
         command.append(executable)
-        input_file_name = self.name() + '.' + self._input_suffix
         if self._input_arg != None:
             command.append(self._input_arg)
-            command.append(input_file_name)
-        if self._output_arg != None:
-            command.append(self._output_arg)
             command.append(self.name())
 
         if os.path.isfile(self.name() + ".regression"):
             os.rename(self.name() + ".stdout",
                       self.name() + ".stdout.old")
 
-        status = -1
         if dry_run:
             print("\n    {0}".format(" ".join(command)))
         else:
-            if verbose:
-                print("    {0}".format(" ".join(command)))
+            print("    cd {0}".format(os.getcwd()), file=testlog)
+            print("    {0}".format(" ".join(command)), file=testlog)
             run_stdout = open(self.name() + ".stdout", 'w')
             start = time.time()
             proc = subprocess.Popen(command,
                     message = self._txtwrap.fill(
                         "ERROR: job '{0}' has exceeded timeout limit of "
                         "{1} seconds.".format(self.name(), self._timeout))
-                    print(''.join(['\n', message, '\n']))
-            status = abs(proc.returncode)
+                    print(''.join(['\n', message, '\n']), file=testlog)
+            pflotran_status = abs(proc.returncode)
             run_stdout.close()
         # pflotran returns 0 on an error (e.g. can't find an input
         # file), 86 on success. 59 for timeout errors?
-        if status != self._PFLOTRAN_SUCCESS:
+        if pflotran_status != self._PFLOTRAN_SUCCESS:
             message = self._txtwrap.fill(
-                "WARNING : {name} : pflotran return an error "
+                "FAIL : {name} : pflotran return an error "
                 "code ({status}) indicating the simulation may have "
                 "failed. Please check '{name}.out' and '{name}.stdout' "
                 "for error messages.".format(
-                    name=self.name(), status=status))
-            print("".join(['\n', message, '\n']))
-        return status
+                    name=self.name(), status=pflotran_status))
+            print("".join(['\n', message, '\n']), file=testlog)
+            status[STATUS] = 1
 
-    def check(self, verbose):
+    def check(self, status, testlog):
         """
         Test the output from the run against the known "gold standard"
         output and determine if the test succeeded or failed.
         We return zero on success, one on failure so that the test
         manager can track how many tests succeeded and failed.
         """
-        self._verbose = verbose
         gold_filename = self.name() + ".regression.gold"
         if not os.path.isfile(gold_filename):
             message = self._txtwrap.fill(
                 "ERROR: could not find regression test gold file "
                 "'{0}'. If this is a new test, please create "
                 "it with '--new-test'.".format(gold_filename))
-            print("".join(['\n', message, '\n']))
-            return 1
+            print("".join(['\n', message, '\n']), file=testlog)
+            status[ERROR] = 1
+            return
         else:
             with open(gold_filename, 'rU') as gold_file:
                 gold_output = gold_file.readlines()
                 "ERROR: could not find regression test file '{0}'."
                 " Please check the standard output file for "
                 "errors.".format(current_filename))
-            print("".join(['\n', message, '\n']))
-            return 1
+            print("".join(['\n', message, '\n']), file=testlog)
+            status[ERROR] = 1
+            return
         else:
             with open(current_filename, 'rU') as current_file:
                 current_output = current_file.readlines()
 
-        if verbose:
-            print("    diff {0} {1}".format(gold_filename, current_filename))
+        print("    diff {0} {1}".format(gold_filename, current_filename), file=testlog)
 
         gold_sections = self._get_sections(gold_output)
         current_sections = self._get_sections(current_output)
         for s in gold_sections:
             if s not in current_sections:
                 self._num_failed += 1
-                if self._verbose:
-                    print("    FAIL: section '{0}' is in the gold output, but "
-                          "not the current output.".format(s))
+                print("    FAIL: section '{0}' is in the gold output, but "
+                      "not the current output.".format(s), file=testlog)
 
         # look for sections that are in current but not gold
         for s in current_sections:
             if s not in gold_sections:
                 self._num_failed += 1
-                if self._verbose:
-                    print("    FAIL: section '{0}' is in the current output, "
-                          "but not the gold output.".format(s))
+                print("    FAIL: section '{0}' is in the current output, "
+                      "but not the gold output.".format(s), file=testlog)
 
         # compare common sections
         for s in gold_sections:
             if s in current_sections:
                 self._num_failed += self._compare_sections(gold_sections[s],
-                                                           current_sections[s])
+                                                           current_sections[s], testlog)
 
-        status = 0
         if self._num_failed > 0:
-            status = 1
-        return status
+            status[STATUS] = 1
 
-    def update(self, verbose):
+
+    def update(self, status, testlog):
         """
         Update the gold standard test results to the current
         output. Both the current regression output and a gold file
         must exist.
         """
-        status = 0
         gold_name = self.name() + ".regression.gold"
         current_name = self.name() + ".regression"
 
         # verify that the gold file exists
         if not os.path.isfile(gold_name):
-            raise Exception("ERROR: test '{0}' results can not be updated "
-                            "because a gold file does not "
-                            "exist!".format(self.name()))
+            print("ERROR: test '{0}' results can not be updated "
+                  "because a gold file does not "
+                  "exist!".format(self.name()), file=testlog)
+            status[ERROR] = 1
 
         # verify that the regression file exists
         if not os.path.isfile(current_name):
-            raise Exception("ERROR: test '{0}' results can not be updated "
-                            "because no regression file "
-                            "exists!".format(self.name()))
-
+            print("ERROR: test '{0}' results can not be updated "
+                  "because no regression file "
+                  "exists!".format(self.name()), file=testlog)
+            status[ERROR] = 1
         try:
-            if verbose:
-                print("  updating test '{0}'... ".format(self.name()),
-                      end='')
+            print("  updating test '{0}'... ".format(self.name()),
+                  end='', file=testlog)
             os.rename(current_name, gold_name)
-            if verbose:
-                print("done")
+            print("done", file=testlog)
         except Exception as e:
             status = 1
             message = str(e)
-            message += "\nERROR : Could not rename '{0}' to '{1}'. "
+            message += "\nFAIL : Could not rename '{0}' to '{1}'. "
             message += "Please rename the file manually!".format(current_name,
                                                                  gold_name)
             message += "    mv {0} {1}".format(current_name, gold_name)
-            print(message)
-            # should we rethrow this exception, or continue?
-            #raise Exception(message)
-        return status
+            print(message, file=testlog)
+            status[STATUS] = 1
 
-    def new_test(self, verbose):
+
+
+    def new_test(self, status, testlog):
         """
         A new test does not have a gold standard regression test. We
         will check to see if a gold standard file exists (an error),
         then create the gold file by copying the current regression
         file to gold.
         """
-        status = 0
         gold_name = self.name() + ".regression.gold"
         current_name = self.name() + ".regression"
 
 
         # check that the regression file was created.
         if not os.path.isfile(current_name):
-            raise Exception("ERROR: could not create new gold file for "
-                            "test '{0}' because no regression file "
-                            "exists!".format(self.name()))
+            print("ERROR: could not create new gold file for "
+                  "test '{0}' because no regression file "
+                  "exists!".format(self.name()), file=testlog)
+            status[ERROR] = 1
 
         try:
-            if verbose:
-                print("  creating gold file '{0}'... ".format(self.name()),
-                      end='')
+            print("  creating gold file '{0}'... ".format(self.name()),
+                  end='', file=testlog)
 
             os.rename(current_name, gold_name)
-            if verbose:
-                print("done")
+            print("done", file=testlog)
         except Exception as e:
             status = 1
             message = str(e)
-            message += "\nERROR : Could not rename '{0}' to '{1}'. "
+            message += "\nFAIL : Could not rename '{0}' to '{1}'. "
             message += "Please rename the file manually!".format(current_name,
                                                                  gold_name)
             message += "    mv {0} {1}".format(current_name, gold_name)
-            print(message)
-            # should we rethrow this exception, or continue?
-            #raise Exception(message)
-        return status
+            print(message, file=testlog)
+            status[STATUS] = 1
+
 
     def _get_sections(self, output):
         """
 
         return sections
 
-    def _compare_sections(self, gold_section, current_section):
+    def _compare_sections(self, gold_section, current_section, testlog):
         name = gold_section['name']
         data_type = gold_section['type']
         section_status = 0
             # solution blocks contain platform dependent performance
             # metrics. We skip them unless they are explicitly
             # requested.
-            if self._verbose:
-                print("    Skipping {0} : {1}".format(data_type, name))
+            print("    Skipping {0} : {1}".format(data_type, name), file=testlog)
         else:
             # if key in gold but not in current --> failed test
             for k in gold_section:
                 if k not in current_section:
                     section_status += 1
-                    if self._verbose:
-                        print("    FAIL: key '{0}' in section '{1}' found in gold "
-                              "output but not current".format(
-                                k, gold_section['name']))
+                    print("    FAIL: key '{0}' in section '{1}' found in gold "
+                          "output but not current".format(
+                            k, gold_section['name']), file=testlog)
 
             # if key in current but not gold --> failed test
             for k in current_section:
                 if k not in gold_section:
                     section_status += 1
                     print("    FAIL: key '{0}' in section '{1}' found in current "
-                          "output but not gold".format(k, current_section['name']))
+                          "output but not gold".format(k, current_section['name']), file=testlog)
 
             # now compare the keys that are in both...
             for k in gold_section:
                     current = current_section[k].split()
                     if len(gold) != len(current):
                         section_status += 1
-                        if self._verbose:
-                            print("    FAIL: {0} : {1} : vector lengths not "
-                                  "equal. gold {2}, current {3}".format(
-                                    name, k, len(gold), len(current)))
+                        print("    FAIL: {0} : {1} : vector lengths not "
+                              "equal. gold {2}, current {3}".format(
+                                name, k, len(gold), len(current)), file=testlog)
                     else:
                         for i in range(len(gold)):
                             try:
                                 status = self._compare_values(name_str, data_type,
-                                                              gold[i], current[i])
+                                                              gold[i], current[i], testlog)
                                 section_status += status
                             except Exception as e:
                                 section_status += 1
-                                if self._verbose:
-                                    print("ERROR: {0} : {1}.\n  {2}".format(
-                                            self.name(), k, str(e)))
+                                print("ERROR: {0} : {1}.\n  {2}".format(
+                                        self.name(), k, str(e)), file=testlog)
 
 
-        if self._verbose and False:
-            print("    {0} : status : {1}".format(name, section_status))
+        if False:
+            print("    {0} : status : {1}".format(name, section_status), file=testlog)
         return section_status
 
-    def _compare_values(self, name, key, previous, current):
+    def _compare_values(self, name, key, previous, current, testlog):
         """
         NOTE(bja): previous and current come into this function as
         strings. We don't know if they should be floats or ints (or
                                                           name, key))
         if delta > tolerance:
             status = 1
-            if self._verbose:
-                print("    FAIL: {0} : {1} > {2} [{3}]".format(
-                        name, delta, tolerance,
-                        tolerance_type))
+            print("    FAIL: {0} : {1} > {2} [{3}]".format(
+                    name, delta, tolerance,
+                    tolerance_type), file=testlog)
         elif self._debug:
             print("    PASS: {0} : {1} <= {2} [{3}]".format(
                     name, delta, tolerance,
         if "input suffix" in executable_args:
             self._input_suffix = executable_args["input suffix"]
 
-        if "output arg" in executable_args:
-            self._output_arg = executable_args["output arg"]
-
     def _set_test_data(self, default_criteria, test_data, timeout, check_performance):
         """
         Set the test criteria for different categories of variables.
 
     def __init__(self):
         self._debug = False
-        self._verbose = False
-        self._num_failed = 0
+        self._file_status = 4*[0]
         self._config_filename = None
         self._executable_args = None
         self._default_test_criteria = None
         return len(self._tests)
 
     def generate_tests(self, config_file, user_suites, user_tests,
-                       timeout, check_performance):
+                       timeout, check_performance, testlog):
         self._read_config_file(config_file)
         self._validate_suites()
         user_suites, user_tests = self._validate_user_lists(user_suites,
-                                                            user_tests)
+                                                            user_tests, testlog)
         self._create_tests(user_suites, user_tests, timeout, check_performance)
 
-    def run_tests(self, mpiexec, executable, verbose,
-                  dry_run, update, new_test, check_only):
+    def run_tests(self, mpiexec, executable,
+                  dry_run, update, new_test, check_only, testlog):
         """
         Run the tests specified in the config file.
 
         """
         if self.num_tests() > 0:
             if new_test:
-                self._run_new(mpiexec, executable, dry_run, verbose)
+                self._run_new(mpiexec, executable, dry_run, testlog)
             elif update:
-                self._run_update(mpiexec, executable, dry_run, verbose)
+                self._run_update(mpiexec, executable, dry_run, testlog)
             elif check_only:
-                self._check_only(dry_run, verbose)
+                self._check_only(dry_run, testlog)
             else:
-                self._run_check(mpiexec, executable, dry_run, verbose)
+                self._run_check(mpiexec, executable, dry_run, testlog)
         else:
-            self._num_failed = self.NO_TESTS_RUN
+            self._file_status[STATUS] = self.NO_TESTS_RUN
 
-    def _run_check(self, mpiexec, executable, dry_run, verbose):
+    def _run_check(self, mpiexec, executable, dry_run, testlog):
         if dry_run:
             print("Dry run:")
-        else:
-            print("Running tests from '{0}':".format(self._config_filename))
-        print(50 * '-')
+        print("Running tests from '{0}':".format(self._config_filename), file=testlog)
+        print(50 * '-', file=testlog)
 
         for t in self._tests:
-            self._test_header(t.name(), verbose)
+            status = 3*[0]
+            self._test_header(t.name(), testlog)
 
-            t.run(mpiexec, executable, dry_run, verbose)
+            t.run(mpiexec, executable, dry_run, status, testlog)
 
-            status = 0
             if not dry_run:
-                status = t.check(verbose)
+                t.check(status, testlog)
 
-            self._num_failed += status
+            self._add_to_file_status(status)
 
-            self._test_summary(t.name(), status, verbose, dry_run,
-                               "passed", "failed")
+            self._test_summary(t.name(), status, dry_run,
+                               "passed", "failed", testlog)
 
-        self._print_file_summary(dry_run, "passed", "failed")
+        self._print_file_summary(dry_run, "passed", "failed", testlog)
 
-    def _check_only(self, dry_run, verbose):
+    def _check_only(self, dry_run, testlog):
         if dry_run:
             print("Dry run:")
-        else:
-            print("Diffing tests from '{0}':".format(self._config_filename))
-        print(50 * '-')
+        print("Checking existing test results from '{0}':".format(self._config_filename), file=testlog)
+        print(50 * '-', file=testlog)
 
         for t in self._tests:
-            self._test_header(t.name(), verbose)
+            status = 3*[0]
+            self._test_header(t.name(), testlog)
 
-            status = 0
             if not dry_run:
-                status = t.check(verbose)
+                t.check(status, testlog)
+                
+            self._add_to_file_status(status)
 
-            self._num_failed += status
+            self._test_summary(t.name(), status, dry_run,
+                               "passed", "failed", testlog)
 
-            self._test_summary(t.name(), status, verbose, dry_run,
-                               "passed", "failed")
+        self._print_file_summary(dry_run, "passed", "failed", testlog)
 
-        self._print_file_summary(dry_run, "passed", "failed")
-
-    def _run_new(self, mpiexec, executable, dry_run, verbose):
+    def _run_new(self, mpiexec, executable, dry_run, testlog):
         if dry_run:
             print("Dry run:")
-        else:
-            print("New tests from '{0}':".format(self._config_filename))
-        print(50 * '-')
+
+        print("New tests from '{0}':".format(self._config_filename), file=testlog)
+        print(50 * '-', file=testlog)
 
         for t in self._tests:
-            self._test_header(t.name(), verbose)
+            status = 3*[0]
+            self._test_header(t.name(), testlog)
 
-            t.run(mpiexec, executable, dry_run, verbose)
+            t.run(mpiexec, executable, dry_run, status, testlog)
 
-            status = 0
             if not dry_run:
-                status = t.new_test(verbose)
-            self._num_failed += status
-            self._test_summary(t.name(), status, verbose, dry_run,
-                               "created", "error creating new test files.")
+                t.new_test(status, testlog)
+            self._add_to_file_status(status)
+            self._test_summary(t.name(), status, dry_run,
+                               "created", "error creating new test files.", testlog)
 
-        self._print_file_summary(dry_run, "created", "could not be created")
+        self._print_file_summary(dry_run, "created", "could not be created", testlog)
 
-    def _run_update(self, mpiexec, executable, dry_run, verbose):
+    def _run_update(self, mpiexec, executable, dry_run, testlog):
         if dry_run:
             print("Dry run:")
-        else:
-            print("Updating tests from '{0}':".format(self._config_filename))
-        print(50 * '-')
+        print("Updating tests from '{0}':".format(self._config_filename), file=testlog)
+        print(50 * '-', file=testlog)
 
         for t in self._tests:
-            self._test_header(t.name(), verbose)
-            t.run(mpiexec, executable, dry_run, verbose)
+            status = 3*[0]
+            self._test_header(t.name(), testlog)
+            t.run(mpiexec, executable, dry_run, status, testlog)
 
-            status = 0
             if not dry_run:
-                status = t.update(verbose)
-            self._num_failed += status
-            self._test_summary(t.name(), status, verbose, dry_run,
-                               "updated", "error updating test.")
+                t.update(status, testlog)
+            self._add_to_file_status(status)
+            self._test_summary(t.name(), status, dry_run,
+                               "updated", "error updating test.", testlog)
 
-        self._print_file_summary(dry_run, "updated", "could not be updated")
+        self._print_file_summary(dry_run, "updated", "could not be updated", testlog)
 
-    def _test_header(self, name, verbose):
-        if verbose:
-            print(40 * '-')
-        print("{0}... ".format(name), end='')
-        if verbose:
-            print()
+    def _test_header(self, name, testlog):
+        print(40 * '-', file=testlog)
+        print("{0}... ".format(name), file=testlog)
 
-    def _test_summary(self, name, status, verbose, dry_run,
-                      success_message, fail_message):
-        if status == 0:
-            if not dry_run:
-                if verbose:
-                    print("{0}... {1}.".format(name, success_message))
-                else:
-                    print(" {0}.".format(success_message))
+    def _test_summary(self, name, status, dry_run,
+                      success_message, fail_message, testlog):
+        if dry_run:
+            print("S", end='', file=sys.stdout)
+            print(" skipped.", file=testlog)
+        else:
+            if (status[STATUS] == 0 and
+                status[WARNING] == 0 and
+                status[ERROR] == 0):
+                print(".", end='', file=sys.stdout)
+                print("{0}... {1}.".format(name, success_message), file=testlog)
+            elif status[STATUS] != 0:
+                print("F", end='', file=sys.stdout)
+                print("{0}... {1}.".format(name, fail_message), file=testlog)
+            elif status[WARNING] != 0:
+                print("W", end='', file=sys.stdout)
+            elif status[ERROR] != 0:
+                print("E", end='', file=sys.stdout)
             else:
-                print(" skipped.")
-        else:
-            if verbose:
-                print("{0}... {1}.".format(name, fail_message))
-            else:
-                print(" {0}.".format(fail_message))
+                print("?", end='', file=sys.stdout)
 
-    def _print_file_summary(self, dry_run, success_message, fail_message):
+        sys.stdout.flush()
+
+
+    def _print_file_summary(self, dry_run, success_message, fail_message, testlog):
         # print a summary of the results for this config file
-        print(50 * '-')
-        if self._num_failed > 0:
+        print(50 * '-', file=testlog)
+        if self._file_status[STATUS] > 0:
             print("{0} : {1} of {2} tests {3}".format(
-                    self._config_filename, self._num_failed, len(self._tests),
-                    fail_message))
+                    self._config_filename, self._file_status[STATUS],
+                    self._file_status[NUM_TESTS], fail_message), file=testlog)
         else:
             if not dry_run:
                 print("{0} : {1} tests {2}".format(self._config_filename,
-                                                   len(self._tests),
-                                                   success_message))
+                                                   self._file_status[NUM_TESTS],
+                                                   success_message), file=testlog)
             else:
-                print("{0} : no tests run.".format(self._config_filename))
+                print("{0} : no tests run.".format(self._config_filename), file=testlog)
+
+    def _add_to_file_status(self, status):
+        self._file_status[STATUS] += status[STATUS]
+        self._file_status[WARNING] += status[WARNING]
+        self._file_status[ERROR] += status[ERROR]
+        self._file_status[NUM_TESTS] += 1
 
     def run_status(self):
-        return self._num_failed
+        return self._file_status
 
     def display_available_tests(self):
         print("Available tests: ")
                             "configuration file '{0}' : {1}".format(
                     self._config_filename, invalid_tests))
 
-    def _validate_user_lists(self, user_suites, user_tests):
+    def _validate_user_lists(self, user_suites, user_tests, testlog):
         """
         Check that the list of suites or tests passed from the command
         line are valid.
                         "WARNING : {0} : Skipping requested suite '{1}' (not "
                         "present, misspelled or empty).".format(
                             self._config_filename, s))
-                    print(message)
+                    print(message, testlog)
 
             u_tests = []
             for t in user_tests:
                     message = self._txtwrap.fill(
                         "WARNING : {0} : Skipping test '{1}' (not present or "
                         "misspelled).".format(self._config_filename, t))
-                    print(message)
+                    print(message, testlog)
 
         return u_suites, u_tests
 
     parser.add_argument('--advanced', action='store_true',
                         help="enable advanced options for developers")
 
-    parser.add_argument('-c', '--config-file', nargs=1, default=None,
+    parser.add_argument('-c', '--config-files', nargs="+", default=None,
                         help='test configuration file to use')
 
     parser.add_argument('--check-only', action='store_true', default=False,
                         'option, with the current output becoming the new '
                         'gold standard')
 
-    parser.add_argument('-v', '--verbose', action='store_true',
-                        help='verbose output')
-
     options = parser.parse_args()
     return options
 
                                 "directory.".format(base_dir))
 
     # add the explicitly listed config files
-    if options.config_file is not None:
-        for f in options.config_file:
+    if options.config_files is not None:
+        for f in options.config_files:
             if not os.path.isabs(f):
                 f = os.path.abspath(f)
             if os.path.isfile(f):
 #                            "'{0}'".format(mpiexec))
     return mpiexec
 
+def summary_report_by_file(report, outfile):
+    status = 0
+    print(70 * '-', file=outfile)
+    print("Regression test file summary:", file=outfile)
+    for t in report:
+        status += report[t][STATUS]
+        if report[t][STATUS] > 0:
+            print("    {0}... {1} tests failed".format(t, report[t][STATUS]), file=outfile)
+        elif report[t][STATUS] == 0:
+            print("    {0}... all tests passed".format(t), file=outfile)
+        elif report[t][STATUS] == RegressionTestManager.NO_TESTS_RUN:
+            print("    {0}... no tests were run.".format(t), file=outfile)
+        else:
+            print("    {0}... could not be run.".format(t), file=outfile)
+        if report[t][WARNING] > 0:
+            print("    {0}... {1} test warnings".format(t, report[t][WARNING]), file=outfile)
+        if report[t][ERROR] > 0:
+            print("    {0}... {1} test errors".format(t, report[t][ERROR]), file=outfile)
+
+    print("\n", file=outfile)
+
+
+def summary_report(run_time, report, outfile):
+    status = 0
+    print(70 * '-', file=outfile)
+    print("Regression test summary:", file=outfile)
+    print("    Total run time: {0:4g} [s]".format(run_time), file=outfile)
+    num_run = 0
+    num_failures = 0
+    num_errors = 0
+    num_warnings = 0
+    for t in report:
+        num_run += report[t][NUM_TESTS]
+        num_failures += report[t][STATUS]
+        num_errors += report[t][ERROR]
+        num_warnings += report[t][WARNING]
+
+    print("    Tests run : {0}".format(num_run), file=outfile)
+    if num_failures > 0:
+        print("    Tests failed : {0}".format(num_failures), file=outfile)
+    else:
+        print("    All tests passed.", file=outfile)
+        
+    if num_errors > 0:
+        print("    Errors : {0}".format(num_errors), file=outfile)
+
+    if num_warnings > 0:
+        print("    Warnings : {0}".format(num_warnings), file=outfile)
+
+    print("\n", file=outfile)
+    return num_failures
+
+def setup_testlog():
+    filename = "pflotran-tests-{0}.testlog".format(
+        datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%S"))
+    testlog = open(filename, 'w')
+    print("  Test log file : {0}".format(filename))
+    print("PFloTran Regression Test Log", file=testlog)
+    print("System Info :", file=testlog)
+    print("    platform : {0}".format(sys.platform), file=testlog)
+    # TODO(bja): it would be nice to print misc compiler, petsc and pflotran info here....
+
+    return testlog
 
 def main(options):
     txtwrap = textwrap.TextWrapper(width=78, subsequent_indent=4*" ")
     mpiexec = check_for_mpiexec(options)
     config_file_list = generate_config_file_list(options)
 
+    print("Running pflotran regression tests :")
+
+    testlog = setup_testlog()
+    
+
     # loop through config files, cd into the appropriate directory,
     # read the appropriate config file and run the various tests.
     start = time.time()
             # a single test throws an exception in a large batch of
             # tests, we can recover and at least try running the other
             # config files.
-            print(80 * '=')
+            print(80 * '=', file=testlog)
 
             # get the absolute path of the directory
             test_dir = os.path.dirname(f)
                                         options.suites,
                                         options.tests,
                                         options.timeout,
-                                        options.check_performance)
+                                        options.check_performance,
+                                        testlog)
 
             if options.debug:
                 print(70 * '-')
 
             test_manager.run_tests(mpiexec,
                                    executable,
-                                   options.verbose,
                                    options.dry_run,
                                    options.update,
                                    options.new_tests,
-                                   options.check_only)
+                                   options.check_only,
+                                   testlog)
 
             report[filename] = test_manager.run_status()
         except Exception as e:
                        "probably an error with commandline options, the "
                        "configuration file, or an internal error.  The "
                        "error is:\n{1}".format(f, str(e)))
-            print(''.join(['\n', message, '\n']))
+            print(''.join(['\n', message, '\n']), file=testlog)
             if options.backtrace:
                 traceback.print_exc()
-            report[filename] = -1
+            print('F', end='', file=sys.stdout)
+            report[filename] = [0, 0, 1]
 
     stop = time.time()
     status = 0
     if not options.dry_run and not options.update:
-        print(70 * '-')
-        print("Regression test summary:")
-        print("    Total run time: {0:4g} [s]".format(stop - start))
-        for t in report:
-            status += report[t]
-            if report[t] > 0:
-                print("    {0}... {1} tests failed".format(t, report[t]))
-            elif report[t] == 0:
-                print("    {0}... all tests passed".format(t))
-            elif report[t] == RegressionTestManager.NO_TESTS_RUN:
-                print("    {0}... no tests were run.".format(t, report[t]))
-            else:
-                print("    {0}... could not be run.".format(t, report[t]))
-        print("\n\n")
+        print("")
+        run_time = stop - start
+        summary_report_by_file(report, testlog)
+        summary_report(run_time, report, testlog)
+        status = summary_report(run_time, report, sys.stdout)
 
     if options.update:
         message = txtwrap.fill(
             "gold standard test results in your revision control commit message!\n")
         print(''.join(['\n', message, '\n']))
 
+
+    testlog.close()
+
     return status
 
 if __name__ == "__main__":

File regression_tests/shortcourse/1D_Calcite/calcite.cfg

-[executable]
-# maybe just hard code this in python?
-input arg : -pflotranin
-input suffix : in
-output arg : -output_prefix
-
-# by default we will assume input file = test-name.in
-# by default we will assume output file = test-name.regression.gold (or something)
-
 [suites]
 geochemistry = calcite_tran_only calcite_flow_and_tran calcite_vsat_flow_and_tran
 

File regression_tests/shortcourse/copper_leaching/cu_leaching.cfg

 [suites]
 flow = cu_leaching
+geochemistry = cu_leaching
+standard = cu_leaching
 
 [cu_leaching]
 timeout = 120.0

File src/pflotran/constraint.F90

   PetscInt, parameter, public :: CONSTRAINT_MINERAL = 5
   PetscInt, parameter, public :: CONSTRAINT_GAS = 6
   PetscInt, parameter, public :: CONSTRAINT_CHARGE_BAL = 7
-  PetscInt, parameter, public :: CONSTRAINT_TOTAL_SORB_AQ_BASED = 8
   PetscInt, parameter, public :: CONSTRAINT_TOTAL_SORB = 9
   PetscInt, parameter, public :: CONSTRAINT_SUPERCRIT_CO2 = 10
 
                 aq_species_constraint%constraint_type(icomp) = &
                   CONSTRAINT_TOTAL_SORB
               case('S')
-                aq_species_constraint%constraint_type(icomp) = &
-                  CONSTRAINT_TOTAL_SORB_AQ_BASED
+                option%io_buffer = '"S" constraint type no longer ' // &
+                  'supported as of March 4, 2013.'
+                call printErrMsg(option)
               case('P','PH')
                 aq_species_constraint%constraint_type(icomp) = CONSTRAINT_PH
               case('L','LOG')

File src/pflotran/dataset.F90

   endif
 #else
 
-#ifdef PARALLELIO_LIB
+#ifdef SCORPIO
       option%io_buffer='In DataLoad: HDF5ReadDataset() not supported with ' // &
-        ' PARALLELIO_LIB'
+        ' SCORPIO'
 #else
     if(.not.associated(dataset%dataset_map)) then
       call HDF5ReadDataset(dataset,option)
   
 #if defined(PETSC_HAVE_HDF5)
 
-#ifdef PARALLELIO_LIB
-  option%io_buffer='HDF5GroupExists() not supported with PARALLELIO_LIB'
-  call printErrMsg(option)
-#else
   DatasetIsCellIndexed = &
     .not.HDF5GroupExists(dataset%filename,dataset%h5_dataset_name,option)
-#endif
+
 #endif
  
 end function DatasetIsCellIndexed

File src/pflotran/discretization.F90

             call printErrMsg(option)
 #else
 
-#ifdef PARALLELIO_LIB
+#ifdef SCORPIO
             call UGridReadHDF5PIOLib(un_str_grid,discretization%filename,option)
 #else
             call UGridReadHDF5(un_str_grid,discretization%filename,option)
 #endif
-! #ifdef PARALLELIO_LIB
+! #ifdef SCORPIO
 
 #endif
 !#if !defined(PETSC_HAVE_HDF5)

File src/pflotran/hdf5.F90

 
   PetscBool, public :: trick_hdf5 = PETSC_FALSE
 
-#if defined(PARALLELIO_LIB)
-  include "piof.h"  
+#if defined(SCORPIO)
+  include "scorpiof.h"  
 #endif
 
 #if defined(PETSC_HAVE_HDF5)
   
   implicit none
 
-#if defined(PARALLELIO_LIB)
+#if defined(SCORPIO)
 
 ! Using Parallel IO library
  
 
   read_block_size = HDF5_READ_BUFFER_SIZE
 
-  call parallelIO_get_dataset_size( num_cells_in_file, file_id, dataset_name, &
+  call scorpio_get_dataset_size( num_cells_in_file, file_id, dataset_name, &
           option%ioread_group_id, ierr)
   !>>>> get size of dataset call h5sget_simple_extent_npoints_f(file_space_id,num_cells_in_file,hdf5_err)
   !if (dataset_size > 0 .and. num_cells_in_file /= dataset_size) then
     call PetscLogEventBegin(logging%event_h5dread_f,ierr)
 
     ! rank_mpi = 1 ! This is in fact number of dimensions
-    call parallelIO_read_same_sub_dataset(cell_ids_i4, PIO_INTEGER, rank_mpi, dims, & 
+    call scorpio_read_same_sub_dataset(cell_ids_i4, SCORPIO_INTEGER, rank_mpi, dims, & 
             offset, file_id, dataset_name, option%ioread_group_id, ierr)
 
     !call h5dread_f(data_set_id,HDF_NATIVE_INTEGER,cell_ids_i4,dims,hdf5_err, &
 ! End of Default & Glenn's HDF5 Broadcast Mechanism
 
 #endif
-! PARALLELIO_LIB
+! SCORPIO
 
 end subroutine HDF5MapLocalToNaturalIndices
    
   PetscInt :: num_indices
   PetscReal, pointer :: real_array(:)
   
-#if defined(PARALLELIO_LIB)    
+#if defined(SCORPIO)    
   integer:: file_space_id
   integer:: memory_space_id
   integer:: data_set_id
 
   call PetscLogEventBegin(logging%event_read_real_array_hdf5,ierr)
                           
-#if defined(PARALLELIO_LIB)    
+#if defined(SCORPIO)    
   read_block_size = HDF5_READ_BUFFER_SIZE
   ! should be a rank=1 data space (i.e., one dimensional dataset)
-  call parallelIO_get_dataset_size( num_reals_in_file, file_id, dataset_name, &
+  call scorpio_get_dataset_size( num_reals_in_file, file_id, dataset_name, &
           option%ioread_group_id, ierr)
 !???? get size of dataset  call h5sget_simple_extent_npoints_f(file_space_id,num_reals_in_file,hdf5_err)
 #if 0
         call PetscLogEventBegin(logging%event_h5dread_f,ierr)
 
         ! rank_mpi = 1 ! This is in fact number of dimensions
-        call parallelIO_read_same_sub_dataset(real_buffer, PIO_DOUBLE, rank_mpi, dims, & 
+        call scorpio_read_same_sub_dataset(real_buffer, SCORPIO_DOUBLE, rank_mpi, dims, & 
                 offset, file_id, dataset_name, option%ioread_group_id, ierr)
 
         !call h5dread_f(data_set_id,H5T_NATIVE_DOUBLE,real_buffer,dims, &
     offset(1) = 0
     length(1) = dims(1)
       call PetscLogEventBegin(logging%event_h5dread_f,ierr)
-      call parallelIO_read_same_sub_dataset(real_buffer, PIO_DOUBLE, rank_mpi, dims, & 
+      call scorpio_read_same_sub_dataset(real_buffer, SCORPIO_DOUBLE, rank_mpi, dims, & 
               offset, file_id, dataset_name, option%ioread_group_id, ierr)
       call PetscLogEventEnd(logging%event_h5dread_f,ierr)                              
   endif
   
 #else
-!PARALLELIO_LIB is not defined
+!SCORPIO is not defined
 
   read_block_size = HDF5_READ_BUFFER_SIZE
   call h5dopen_f(file_id,dataset_name,data_set_id,hdf5_err)
   call h5sclose_f(file_space_id,hdf5_err)
   call h5dclose_f(data_set_id,hdf5_err)
 #endif
-!PARALLELIO_LIB
+!SCORPIO
 
   call PetscLogEventEnd(logging%event_read_real_array_hdf5,ierr)
                           
   
   implicit none
 
-#if defined(PARALLELIO_LIB)
+#if defined(SCORPIO)
   type(option_type) :: option
   character(len=MAXWORDLENGTH) :: dataset_name
   PetscInt :: dataset_size
   length = 0
   num_integers_in_file = 0
 
-  call parallelIO_get_dataset_size( num_integers, file_id, dataset_name, &
+  call scorpio_get_dataset_size( num_integers, file_id, dataset_name, &
           option%ioread_group_id, ierr)
   num_integers_in_file = int(num_integers) 
 #if 0  
            offset(1) = integer_count
            length(1) = dims(1)
            call PetscLogEventBegin(logging%event_h5dread_f,ierr)                              
-           call parallelIO_read_same_sub_dataset(integer_buffer_i4, PIO_INTEGER, rank_mpi, dims, & 
+           call scorpio_read_same_sub_dataset(integer_buffer_i4, SCORPIO_INTEGER, rank_mpi, dims, & 
                 offset, file_id, dataset_name, option%ioread_group_id, ierr)
            !call h5dread_f(data_set_id,HDF_NATIVE_INTEGER,integer_buffer_i4,dims, &
                           !hdf5_err,memory_space_id,file_space_id,prop_id)   
        offset(1) = integer_count
        length(1) = dims(1)
        call PetscLogEventBegin(logging%event_h5dread_f,ierr)                              
-       call parallelIO_read_same_sub_dataset(integer_buffer_i4, PIO_INTEGER, rank_mpi, dims, & 
+       call scorpio_read_same_sub_dataset(integer_buffer_i4, SCORPIO_INTEGER, rank_mpi, dims, & 
                 offset, file_id, dataset_name, option%ioread_group_id, ierr)
        !call h5dread_f(data_set_id,HDF_NATIVE_INTEGER,integer_buffer_i4,dims, &
                       !hdf5_err,memory_space_id,file_space_id,prop_id)   
   call PetscLogEventEnd(logging%event_read_int_array_hdf5,ierr)
 
 #else
-! PARALLELIO_LIB is not defined
+! SCORPIO is not defined
 
   type(option_type) :: option
   character(len=MAXWORDLENGTH) :: dataset_name
 ! Default & Glenn's HDF5 Broadcast Mechanism (uses HDF5 Independent I/O mode)
                           
 #endif
-! PARALLELIO_LIB
+! SCORPIO
 
 end subroutine HDF5ReadIntegerArray
 
   character(len=*) :: name
   PetscReal :: array(:)
 
-#if defined(PARALLELIO_LIB_WRITE)    
+#if defined(SCORPIO_WRITE)    
   integer:: file_id
   integer:: data_type
   integer:: file_space_id
   integer, pointer :: int_array_i4(:)
   PetscReal, pointer :: double_array(:)
 
-#if defined(PARALLELIO_LIB_WRITE)
+#if defined(SCORPIO_WRITE)
 
 !  write(option%io_buffer,'(" Writing dataset block: ", a, " type - ", i, ".")') trim(name), data_type
 !  call printMsg(option)
       call PetscLogEventBegin(logging%event_h5dwrite_f,ierr)         
        !write(option%io_buffer, &
        !   '(a," Writing double dataset1: dimensions: ",i9,i9,i9, " Data type and ndims: ",i9, i9)') & 
-       !trim(name), dims(1), dims(2), dims(3), PIO_DOUBLE, rank_mpi
+       !trim(name), dims(1), dims(2), dims(3), SCORPIO_DOUBLE, rank_mpi
        !call printMsg(option)   
-       call parallelIO_write_dataset_block(double_array, PIO_DOUBLE, rank_mpi, &
+       call scorpio_write_dataset_block(double_array, SCORPIO_DOUBLE, rank_mpi, &
               dims, length, start, file_id, name, &
               option%iowrite_group_id, ierr)
       !call h5dwrite_f(data_set_id,data_type,double_array,dims, &
       call PetscLogEventBegin(logging%event_h5dwrite_f,ierr)                              
        !write(option%io_buffer, &
        !   '(a," Writing integer dataset1: dimensions: ",i9,i9,i9, " Data type and ndims: ",i9, i9)') & 
-       !trim(name), dims(1), dims(2), dims(3), PIO_INTEGER, rank_mpi
+       !trim(name), dims(1), dims(2), dims(3), SCORPIO_INTEGER, rank_mpi
        !call printMsg(option)   
-      call parallelIO_write_dataset_block(int_array_i4, PIO_INTEGER, rank_mpi, &
+      call scorpio_write_dataset_block(int_array_i4, SCORPIO_INTEGER, rank_mpi, &
               dims, length, start, file_id, name, &
               option%iowrite_group_id, ierr)
       !!call h5dwrite_f(data_set_id,data_type,int_array_i4,dims, &
   call PetscLogEventEnd(logging%event_write_struct_dataset_hdf5,ierr)
 
 #else
-! PARALLELIO_LIB_WRITE is not defined  
+! SCORPIO_WRITE is not defined  
 
 ! Default HDF5 Write
 
   call PetscLogEventEnd(logging%event_write_struct_dataset_hdf5,ierr)
                           
 #endif
-! PARALLELIO_LIB_WRITE vs previous
+! SCORPIO_WRITE vs previous
 
 end subroutine HDF5WriteStructuredDataSet
       
   
   implicit none
 
-#if defined(PARALLELIO_LIB)
+#if defined(SCORPIO)
 
   type(grid_type) :: grid
   type(option_type) :: option
   PetscInt, pointer :: indices(:)
   PetscInt :: num_indices
     
-#if defined(PARALLELIO_LIB)    
+#if defined(SCORPIO)    
   integer :: file_id
   integer:: file_space_id
   integer:: memory_space_id
   istart = iend - grid%nlmax
   
   ! should be a rank=1 data space
-  call parallelIO_get_dataset_size( num_data_in_file, file_id, dataset_name, &
+  call scorpio_get_dataset_size( num_data_in_file, file_id, dataset_name, &
           option%ioread_group_id, ierr)
   globaldims(1) = num_data_in_file 
 !???? get size of dataset  call h5sget_simple_extent_npoints_f(file_space_id,num_data_in_file,hdf5_err)
     length(1) = iend-istart
     call PetscLogEventBegin(logging%event_h5dread_f,ierr)                              
                                
-    call parallelIO_read_dataset(indices_i4(1:iend-istart), PIO_INTEGER, rank_mpi, globaldims, dims, & 
-            file_id, dataset_name, option%ioread_group_id, NONUNIFORM_CONTIGUOUS_READ, ierr)
+    call scorpio_read_dataset(indices_i4(1:iend-istart), SCORPIO_INTEGER, rank_mpi, globaldims, dims, & 
+            file_id, dataset_name, option%ioread_group_id, SCORPIO_NONUNIFORM_CONTIGUOUS_READ, ierr)
     !call h5dread_f(data_set_id,HDF_NATIVE_INTEGER,indices_i4(1:iend-istart), &
                    !dims,hdf5_err,memory_space_id,file_space_id,prop_id)                     
     call PetscLogEventEnd(logging%event_h5dread_f,ierr)                              
   call PetscLogEventEnd(logging%event_read_indices_hdf5,ierr)
 
 #else
-! PARALLELIO_LIB is not defined
+! SCORPIO is not defined
 
 ! Default HDF5 Mechanism 
 
 ! End of Default HDF5 Mechanism  
   
 #endif
-! PARALLELIO_LIB
+! SCORPIO
   
 end subroutine HDF5ReadIndices
 
 #include "finclude/petscvec.h"
 #include "finclude/petscvec.h90"
 
-#if defined(PARALLELIO_LIB)
+#if defined(SCORPIO)
  
   type(discretization_type) :: discretization
   type(grid_type) :: grid
   
   ! should be a rank=1 data space
 
-  call parallelIO_get_dataset_size( num_data_in_file, file_id, dataset_name, &
+  call scorpio_get_dataset_size( num_data_in_file, file_id, dataset_name, &
           option%ioread_group_id, ierr)
   globaldims(1) = num_data_in_file
 !???? get size   call h5sget_simple_extent_npoints_f(file_space_id,num_data_in_file,hdf5_err)
     if (data_type == H5T_NATIVE_DOUBLE) then
       call PetscLogEventBegin(logging%event_h5dread_f,ierr)                              
     
-      call parallelIO_read_dataset(real_buffer, PIO_DOUBLE, rank_mpi, globaldims, dims, & 
-            file_id, dataset_name, option%ioread_group_id, NONUNIFORM_CONTIGUOUS_READ, ierr)
+      call scorpio_read_dataset(real_buffer, SCORPIO_DOUBLE, rank_mpi, globaldims, dims, & 
+            file_id, dataset_name, option%ioread_group_id, SCORPIO_NONUNIFORM_CONTIGUOUS_READ, ierr)
       !call h5dread_f(data_set_id,H5T_NATIVE_DOUBLE,real_buffer,dims, &
                      !hdf5_err,memory_space_id,file_space_id,prop_id)
       call PetscLogEventEnd(logging%event_h5dread_f,ierr)                              
       allocate(integer_buffer_i4(iend-istart))
       call PetscLogEventBegin(logging%event_h5dread_f,ierr)                              
 
-      call parallelIO_read_dataset(integer_buffer_i4, PIO_INTEGER, rank_mpi, globaldims, dims, & 
-            file_id, dataset_name, option%ioread_group_id, NONUNIFORM_CONTIGUOUS_READ, ierr)
+      call scorpio_read_dataset(integer_buffer_i4, SCORPIO_INTEGER, rank_mpi, globaldims, dims, & 
+            file_id, dataset_name, option%ioread_group_id, SCORPIO_NONUNIFORM_CONTIGUOUS_READ, ierr)
       !call h5dread_f(data_set_id,HDF_NATIVE_INTEGER,integer_buffer_i4,dims, &
                      !hdf5_err,memory_space_id,file_space_id,prop_id)
       call PetscLogEventEnd(logging%event_h5dread_f,ierr)                              
   call PetscLogEventEnd(logging%event_read_array_hdf5,ierr)
 
 #else
-! PARALLELIO_LIB is not defined
+! SCORPIO is not defined
 
 ! Default HDF5 Mechanism 
  
 ! End of Default HDF5 Mechanism
 
 #endif
-! PARALLELIO_LIB
+! SCORPIO
 
 end subroutine HDF5ReadArray
 
   call GridCreateNaturalToGhostedHash(grid,option)
 #endif
 
-#if defined(PARALLELIO_LIB)
+#if defined(SCORPIO)
   if (mod(option%myrank,option%hdf5_read_group_size) == 0) then
       option%io_buffer = 'Opening hdf5 file: ' // trim(filename)
       call printMsg(option)
   endif
 
   filename = trim(filename) // CHAR(0)
-  call parallelIO_open_file(filename, option%ioread_group_id, FILE_READONLY, &
+  call scorpio_open_file(filename, option%ioread_group_id, SCORPIO_FILE_READONLY, &
           file_id, ierr)
   string = '/Regions/' // trim(region%name) // '/Cell Ids' //CHAR(0)
   option%io_buffer = 'Reading dataset: ' // trim(string)
 
  allocate(indices(grid%nlmax))
   ! Read Cell Ids  
-  string = 'Regions' // '/' // trim(region%name) // "Cell Ids" // CHAR(0)
+  string = 'Regions' // '/' // trim(region%name) // "/Cell Ids" // CHAR(0)
   ! num_indices <= 0 indicates that the array size is uncertain and
   ! the size will be returned in num_indices
   num_indices = -1
        option%io_buffer = 'Closing hdf5 file: ' // trim(filename)
        call printMsg(option)   
    endif
-   call parallelio_close_file(file_id, option%ioread_group_id, ierr)
+   call scorpio_close_file(file_id, option%ioread_group_id, ierr)
 
   call GridDestroyHashTable(grid)
 
-! PARALLELIO_LIB
+! SCORPIO
 #else   
 
   ! initialize fortran hdf5 interface 
 
   call GridDestroyHashTable(grid)
 #endif  
-! if PARALLELIO_LIB is not defined
+! if SCORPIO is not defined
 
 #endif
 !PETSC_HAVE_HDF5
 
   call PetscLogEventBegin(logging%event_cell_indx_int_read_hdf5,ierr)
   
-#if defined(PARALLELIO_LIB)
+#if defined(SCORPIO)
   if (mod(option%myrank,option%hdf5_read_group_size) == 0) then  
      option%io_buffer = 'Opening hdf5 file: ' // trim(filename)
      call printMsg(option) 
   end if   
   filename = trim(filename) //CHAR(0)
-  call parallelIO_open_file(filename, option%ioread_group_id, FILE_READONLY, &
+  call scorpio_open_file(filename, option%ioread_group_id, SCORPIO_FILE_READONLY, &
           file_id, ierr)
 
   ! Read Cell Ids
   if (mod(option%myrank,option%hdf5_read_group_size) == 0) then  
     option%io_buffer = 'Closing hdf5 file: ' // trim(filename)
     call printMsg(option)   
-    call parallelio_close_file(file_id, option%ioread_group_id, ierr)
+    call scorpio_close_file(file_id, option%ioread_group_id, ierr)
   endif
 
 #else
-! if PARALLELIO_LIB is not defined
+! if SCORPIO is not defined
 
  ! initialize fortran hdf5 interface
   call h5open_f(hdf5_err)
   call h5fclose_f(file_id,hdf5_err)
   call h5close_f(hdf5_err)
 #endif  
-! if PARALLELIO_LIB is not defined
+! if SCORPIO is not defined
 
 #endif
 ! PETSC_HAVE_HDF5
 
   call PetscLogEventBegin(logging%event_cell_indx_real_read_hdf5,ierr)
 
-#if defined(PARALLELIO_LIB)