Commits

Hong Zhang committed d4036a1

Mv manual page of MatMerge_SeqsToMPI() to the correct place

Hg-commit: 40f2c4b1daf0e49842c8346312664f66c0a27e89

Comments (0)

Files changed (1)

src/mat/impls/aij/mpi/mpiaij.c

 #undef __FUNCT__  
 #define __FUNCT__ "MatMerge"
 /*@
-      MatMerge - Creates a single large PETSc matrix by concatinating sequential
+      MatMerge - Creates a single large PETSc matrix by concatenating sequential
                  matrices from each processor
 
     Collective on MPI_Comm
 
 #undef __FUNCT__
 #define __FUNCT__ "MatMerge_SeqsToMPINumeric"
-/*@C
-      MatMerge_SeqsToMPI - Creates a MPIAIJ matrix by adding sequential
-                 matrices from each processor
-
-    Collective on MPI_Comm
-
-   Input Parameters:
-+    comm - the communicators the parallel matrix will live on
-.    seqmat - the input sequential matrices
-.    m - number of local rows (or PETSC_DECIDE)
-.    n - number of local columns (or PETSC_DECIDE)
--    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
-
-   Output Parameter:
-.    mpimat - the parallel matrix generated
-
-    Level: advanced
-
-   Notes:
-     The dimensions of the sequential matrix in each processor MUST be the same.
-     The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
-     destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
-@*/
 PetscErrorCode  MatMerge_SeqsToMPINumeric(Mat seqmat,Mat mpimat)
 {
   PetscErrorCode       ierr;
 
 #undef __FUNCT__
 #define __FUNCT__ "MatMerge_SeqsToMPI"
+/*@C
+      MatMerge_SeqsToMPI - Creates a MPIAIJ matrix by adding sequential
+                 matrices from each processor
+
+    Collective on MPI_Comm
+
+   Input Parameters:
++    comm - the communicators the parallel matrix will live on
+.    seqmat - the input sequential matrices
+.    m - number of local rows (or PETSC_DECIDE)
+.    n - number of local columns (or PETSC_DECIDE)
+-    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
+
+   Output Parameter:
+.    mpimat - the parallel matrix generated
+
+    Level: advanced
+
+   Notes:
+     The dimensions of the sequential matrix in each processor MUST be the same.
+     The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
+     destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
+@*/
 PetscErrorCode  MatMerge_SeqsToMPI(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
 {
   PetscErrorCode   ierr;