Commits

BarryFSmith committed dd2fa69

fixed PetscViewerASCIIPrintf() for a single viewer so that the singletons ASCII output is actually preserved

Comments (0)

Files changed (18)

src/dm/examples/tests/ex2.c

     PetscViewer            sviewer;
     ISLocalToGlobalMapping is;
 
+    ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD,PETSC_TRUE);CHKERRQ(ierr);
     ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\nLocal Vector: processor %d\n",rank);CHKERRQ(ierr);
     ierr = PetscViewerGetSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
     ierr = VecView(local,sviewer);CHKERRQ(ierr);

src/dm/examples/tests/ex4.c

   ierr = PetscOptionsGetBool(NULL,"-local_print",&flg,NULL);CHKERRQ(ierr);
   if (flg) {
     PetscViewer sviewer;
+  
+    ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD,PETSC_TRUE);CHKERRQ(ierr);
     ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\nLocal Vector: processor %d\n",rank);CHKERRQ(ierr);
     ierr = PetscViewerGetSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
     ierr = VecView(local,sviewer);CHKERRQ(ierr);

src/dm/examples/tests/ex6.c

   ierr = PetscOptionsGetBool(NULL,"-local_print",&flg,NULL);CHKERRQ(ierr);
   if (flg) {
     PetscViewer sviewer;
+    ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD,PETSC_TRUE);CHKERRQ(ierr);
     ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\nLocal Vector: processor %d\n",rank);CHKERRQ(ierr);
     ierr = PetscViewerGetSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
     ierr = VecView(local,sviewer);CHKERRQ(ierr);

src/dm/examples/tests/output/ex16_1.out

 18
 19
 20
-[0] My part of redundant1 array
+[0] My part of redundant1 vector
 Vec Object: 1 MPI processes
   type: seq
 0
 2
 3
 4
-[1] My part of redundant1 array
+[1] My part of redundant1 vector
 Vec Object: 1 MPI processes
   type: seq
 0
 2
 3
 4
-[2] My part of redundant1 array
+[2] My part of redundant1 vector
 Vec Object: 1 MPI processes
   type: seq
 0
 12
 17
 18
-[0] My part of redundant2 array
+[0] My part of redundant2 vector
 Vec Object: 1 MPI processes
   type: seq
 13
 14
-[1] My part of redundant2 array
+[1] My part of redundant2 vector
 Vec Object: 1 MPI processes
   type: seq
 13
 14
-[2] My part of redundant2 array
+[2] My part of redundant2 vector
 Vec Object: 1 MPI processes
   type: seq
 13
 18
 19
 20
-Local to global mapping of redundant1 array
+Local to global mapping of redundant1 vector
 [0] 0 0
 [0] 1 1
 [0] 2 2
 [2] 0 12
 [2] 1 17
 [2] 2 18
-Local to global mapping of redundant2 array
+Local to global mapping of redundant2 vector
 [0] 0 13
 [0] 1 14
 [1] 0 13

src/dm/examples/tests/output/ex16_2.out

 18
 19
 20
-[0] My part of redundant1 array
+[0] My part of redundant1 vector
 Vec Object: 1 MPI processes
   type: seq
 0
 2
 3
 4
-[1] My part of redundant1 array
+[1] My part of redundant1 vector
 Vec Object: 1 MPI processes
   type: seq
 0
 2
 3
 4
-[2] My part of redundant1 array
+[2] My part of redundant1 vector
 Vec Object: 1 MPI processes
   type: seq
 0
 12
 17
 18
-[0] My part of redundant2 array
+[0] My part of redundant2 vector
 Vec Object: 1 MPI processes
   type: seq
 13
 14
-[1] My part of redundant2 array
+[1] My part of redundant2 vector
 Vec Object: 1 MPI processes
   type: seq
 13
 14
-[2] My part of redundant2 array
+[2] My part of redundant2 vector
 Vec Object: 1 MPI processes
   type: seq
 13
 36
 57
 40
-Local to global mapping of redundant1 array
+Local to global mapping of redundant1 vector
 [0] 0 0
 [0] 1 1
 [0] 2 2
 [2] 0 12
 [2] 1 17
 [2] 2 18
-Local to global mapping of redundant2 array
+Local to global mapping of redundant2 vector
 [0] 0 13
 [0] 1 14
 [1] 0 13

src/ksp/ksp/examples/tutorials/makefile

 	   if (${DIFF} output/ex7_1.out ex7_1.tmp) then true; \
 	   else echo ${PWD} ; echo "Possible problem with with ex7_1, diffs above \n========================================="; fi; \
 	   ${RM} -f ex7_1.tmp
+runex7_2:
+	-@${MPIEXEC} -n 2 ./ex7 -ksp_view > ex7_2.tmp 2>&1; \
+	   if (${DIFF} output/ex7_2.out ex7_2.tmp) then true; \
+	   else echo ${PWD} ; echo "Possible problem with with ex7_2, diffs above \n========================================="; fi; \
+	   ${RM} -f ex7_2.tmp
 runex9:
 	-@${MPIEXEC} -n 1 ./ex9 -t 2 -pc_type jacobi -ksp_monitor_short -ksp_type gmres -ksp_gmres_cgs_refinement_type refine_always \
 	   -s2_ksp_type bcgs -s2_pc_type jacobi -s2_ksp_monitor_short \
 TESTEXAMPLES_C		       = ex1.PETSc runex1 runex1_2 runex1_3 ex1.rm ex2.PETSc runex2 runex2_2 runex2_3 \
                                  runex2_4 runex2_bjacobi runex2_bjacobi_2 runex2_bjacobi_3 runex2_specest_1 runex2_specest_2 \
                                  runex2_chebyest_1 runex2_chebyest_2 runex2_chebyest_3 runex2_chebyest_4 runex2_fbcgs runex2_fbcgs_2 ex2.rm \
-                                 ex7.PETSc runex7 ex7.rm ex5.PETSc runex5 runex5_2 \
+                                 ex7.PETSc runex7 runex7_2 ex7.rm ex5.PETSc runex5 runex5_2 \
                                  runex5_redundant_0 runex5_redundant_1 runex5_redundant_2 runex5_redundant_3 runex5_redundant_4 ex5.rm \
                                  ex8g.PETSc runex8g_1 runex8g_2 runex8g_3 ex8g.rm \
                                  ex9.PETSc runex9 ex9.rm ex12.PETSc runex12 ex12.rm ex13.PETSc runex13 ex13.rm \

src/ksp/ksp/examples/tutorials/output/ex7_2.out

+KSP Object: 2 MPI processes
+  type: gmres
+    GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+    GMRES: happy breakdown tolerance 1e-30
+  maximum iterations=10000, initial guess is zero
+  tolerances:  relative=1e-05, absolute=1e-50, divergence=10000
+  left preconditioning
+  using PRECONDITIONED norm type for convergence test
+PC Object: 2 MPI processes
+  type: bjacobi
+    block Jacobi: number of blocks = 8
+    Local solve info for each block is in the following KSP and PC objects:
+  [0] number of local blocks = 4, first local block number = 0
+    [0] local block number 0
+    KSP Object:    (sub_)     1 MPI processes
+      type: bcgs
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-06, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: none
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [0] local block number 1
+    KSP Object:    (sub_)     1 MPI processes
+      type: preonly
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-05, absolute=1e-50, divergence=10000
+      left preconditioning
+      using NONE norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: ilu
+        ILU: out-of-place factorization
+        0 levels of fill
+        tolerance for zero pivot 2.22045e-14
+        using diagonal shift on blocks to prevent zero pivot [INBLOCKS]
+        matrix ordering: natural
+        factor fill ratio given 1, needed 1
+          Factored matrix follows:
+            Mat Object:             1 MPI processes
+              type: seqaij
+              rows=10, cols=10
+              package used to perform factorization: petsc
+              total: nonzeros=28, allocated nonzeros=28
+              total number of mallocs used during MatSetValues calls =0
+                not using I-node routines
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [0] local block number 2
+    KSP Object:    (sub_)     1 MPI processes
+      type: bcgs
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-06, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: none
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [0] local block number 3
+    KSP Object:    (sub_)     1 MPI processes
+      type: preonly
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-05, absolute=1e-50, divergence=10000
+      left preconditioning
+      using NONE norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: ilu
+        ILU: out-of-place factorization
+        0 levels of fill
+        tolerance for zero pivot 2.22045e-14
+        using diagonal shift on blocks to prevent zero pivot [INBLOCKS]
+        matrix ordering: natural
+        factor fill ratio given 1, needed 1
+          Factored matrix follows:
+            Mat Object:             1 MPI processes
+              type: seqaij
+              rows=10, cols=10
+              package used to perform factorization: petsc
+              total: nonzeros=28, allocated nonzeros=28
+              total number of mallocs used during MatSetValues calls =0
+                not using I-node routines
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+  [1] number of local blocks = 4, first local block number = 1
+    [1] local block number 0
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [1] local block number 1
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [1] local block number 2
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [1] local block number 3
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+  linear system matrix = precond matrix:
+  Mat Object:   2 MPI processes
+    type: mpiaij
+    rows=80, cols=80
+    total: nonzeros=364, allocated nonzeros=800
+    total number of mallocs used during MatSetValues calls =0
+      not using I-node (on process 0) routines
+KSP Object: 2 MPI processes
+  type: gmres
+    GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+    GMRES: happy breakdown tolerance 1e-30
+  maximum iterations=10000, initial guess is zero
+  tolerances:  relative=1e-05, absolute=1e-50, divergence=10000
+  left preconditioning
+  using PRECONDITIONED norm type for convergence test
+PC Object: 2 MPI processes
+  type: bjacobi
+    block Jacobi: number of blocks = 8
+    Local solve info for each block is in the following KSP and PC objects:
+  [0] number of local blocks = 4, first local block number = 0
+    [0] local block number 0
+    KSP Object:    (sub_)     1 MPI processes
+      type: bcgs
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-06, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: none
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [0] local block number 1
+    KSP Object:    (sub_)     1 MPI processes
+      type: preonly
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-05, absolute=1e-50, divergence=10000
+      left preconditioning
+      using NONE norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: ilu
+        ILU: out-of-place factorization
+        0 levels of fill
+        tolerance for zero pivot 2.22045e-14
+        using diagonal shift on blocks to prevent zero pivot [INBLOCKS]
+        matrix ordering: natural
+        factor fill ratio given 1, needed 1
+          Factored matrix follows:
+            Mat Object:             1 MPI processes
+              type: seqaij
+              rows=10, cols=10
+              package used to perform factorization: petsc
+              total: nonzeros=28, allocated nonzeros=28
+              total number of mallocs used during MatSetValues calls =0
+                not using I-node routines
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [0] local block number 2
+    KSP Object:    (sub_)     1 MPI processes
+      type: bcgs
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-06, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: none
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [0] local block number 3
+    KSP Object:    (sub_)     1 MPI processes
+      type: preonly
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-05, absolute=1e-50, divergence=10000
+      left preconditioning
+      using NONE norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: ilu
+        ILU: out-of-place factorization
+        0 levels of fill
+        tolerance for zero pivot 2.22045e-14
+        using diagonal shift on blocks to prevent zero pivot [INBLOCKS]
+        matrix ordering: natural
+        factor fill ratio given 1, needed 1
+          Factored matrix follows:
+            Mat Object:             1 MPI processes
+              type: seqaij
+              rows=10, cols=10
+              package used to perform factorization: petsc
+              total: nonzeros=28, allocated nonzeros=28
+              total number of mallocs used during MatSetValues calls =0
+                not using I-node routines
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+  [1] number of local blocks = 4, first local block number = 1
+    [1] local block number 0
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [1] local block number 1
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [1] local block number 2
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+    [1] local block number 3
+    KSP Object:    (sub_)     1 MPI processes
+      type: gmres
+        GMRES: restart=30, using Classical (unmodified) Gram-Schmidt Orthogonalization with no iterative refinement
+        GMRES: happy breakdown tolerance 1e-30
+      maximum iterations=10000, initial guess is zero
+      tolerances:  relative=1e-07, absolute=1e-50, divergence=10000
+      left preconditioning
+      using PRECONDITIONED norm type for convergence test
+    PC Object:    (sub_)     1 MPI processes
+      type: jacobi
+      linear system matrix = precond matrix:
+      Mat Object:       1 MPI processes
+        type: seqaij
+        rows=10, cols=10
+        total: nonzeros=28, allocated nonzeros=28
+        total number of mallocs used during MatSetValues calls =0
+          not using I-node routines
+    - - - - - - - - - - - - - - - - - -
+  linear system matrix = precond matrix:
+  Mat Object:   2 MPI processes
+    type: mpiaij
+    rows=80, cols=80
+    total: nonzeros=364, allocated nonzeros=800
+    total number of mallocs used during MatSetValues calls =0
+      not using I-node (on process 0) routines
+Norm of error 1.09983e-05 iterations 13

src/ksp/pc/impls/asm/asm.c

       ierr = PetscViewerASCIIPrintf(viewer,"  Local solve info for each block is in the following KSP and PC objects:\n");CHKERRQ(ierr);
       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
       ierr = PetscViewerASCIIPrintf(viewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
-      for (i=0; i<osm->n_local; i++) {
-        ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
-        if (i < osm->n_local_true) {
-          ierr = ISGetLocalSize(osm->is[i],&bsz);CHKERRQ(ierr);
-          ierr = PetscViewerASCIISynchronizedPrintf(sviewer,"[%d] local block number %D, size = %D\n",(int)rank,i,bsz);CHKERRQ(ierr);
-          ierr = KSPView(osm->ksp[i],sviewer);CHKERRQ(ierr);
-          ierr = PetscViewerASCIISynchronizedPrintf(sviewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
-        }
-        ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
+      ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
+      for (i=0; i<osm->n_local_true; i++) {
+        ierr = ISGetLocalSize(osm->is[i],&bsz);CHKERRQ(ierr);
+        ierr = PetscViewerASCIISynchronizedPrintf(sviewer,"[%d] local block number %D, size = %D\n",(int)rank,i,bsz);CHKERRQ(ierr);
+        ierr = KSPView(osm->ksp[i],sviewer);CHKERRQ(ierr);
+        ierr = PetscViewerASCIISynchronizedPrintf(sviewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
       }
+      ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
       ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);CHKERRQ(ierr);

src/ksp/pc/impls/bjacobi/bjacobi.c

       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] number of local blocks = %D, first local block number = %D\n",
                                                 rank,jac->n_local,jac->first_local);CHKERRQ(ierr);
       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
-      for (i=0; i<n_global; i++) {
-        ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
-        if (i < jac->n_local) {
-          ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] local block number %D\n",rank,i);CHKERRQ(ierr);
-          ierr = KSPView(jac->ksp[i],sviewer);CHKERRQ(ierr);
-          ierr = PetscViewerASCIISynchronizedPrintf(viewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
-        }
-        ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
+      ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
+      for (i=0; i<jac->n_local; i++) {
+        ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] local block number %D\n",rank,i);CHKERRQ(ierr);
+        ierr = KSPView(jac->ksp[i],sviewer);CHKERRQ(ierr);
+        ierr = PetscViewerASCIISynchronizedPrintf(viewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
       }
+      ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
       ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);CHKERRQ(ierr);

src/mat/examples/tests/ex166.c

   ierr = PetscViewerASCIIPrintf(viewer,"Row permutation\n");CHKERRQ(ierr);
   ierr = ISView(isrow,viewer);CHKERRQ(ierr);
   ierr = PetscViewerASCIIPrintf(viewer,"Column permutation\n");CHKERRQ(ierr);
+  ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);CHKERRQ(ierr);
   ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
   ierr = ISView(iscol,sviewer);CHKERRQ(ierr);
   ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);

src/mat/examples/tests/ex4.c

      sviewer will cause the submatrices (one per processor) to be printed in the correct order
   */
   ierr = PetscViewerASCIIPrintf(PETSC_VIEWER_STDOUT_WORLD,"Submatrices\n");CHKERRQ(ierr);
+  ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD,PETSC_TRUE);CHKERRQ(ierr);
   ierr = PetscViewerGetSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
   ierr = MatView(submat,sviewer);CHKERRQ(ierr);
   ierr = PetscViewerRestoreSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);

src/mat/impls/dense/mpi/mpidense.c

     if (!rank) {
       ierr = PetscObjectSetName((PetscObject)((Mat_MPIDense*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr);
       /* Set the type name to MATMPIDense so that the correct type can be printed out by PetscObjectPrintClassNamePrefixType() in MatView_SeqDense_ASCII()*/
-      PetscStrcpy(((PetscObject)((Mat_MPIDense*)(A->data))->A)->type_name,MATMPIDENSE);
+      ierr = PetscStrcpy(((PetscObject)((Mat_MPIDense*)(A->data))->A)->type_name,MATMPIDENSE);CHKERRQ(ierr);
       ierr = MatView(((Mat_MPIDense*)(A->data))->A,sviewer);CHKERRQ(ierr);
     }
     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);

src/mat/impls/is/matis.c

   PetscViewer    sviewer;
 
   PetscFunctionBegin;
+  ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);CHKERRQ(ierr);
   ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
   ierr = MatView(a->A,sviewer);CHKERRQ(ierr);
   ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);

src/mat/impls/sbaij/mpi/mpisbaij.c

     if (!rank) {
       ierr = PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr);
       /* Set the type name to MATMPISBAIJ so that the correct type can be printed out by PetscObjectPrintClassNamePrefixType() in MatView_SeqSBAIJ_ASCII()*/
-      PetscStrcpy(((PetscObject)((Mat_MPISBAIJ*)(A->data))->A)->type_name,MATMPISBAIJ);
+      ierr = PetscStrcpy(((PetscObject)((Mat_MPISBAIJ*)(A->data))->A)->type_name,MATMPISBAIJ);CHKERRQ(ierr);
       ierr = MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
     }
     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);

src/snes/impls/nasm/nasm.c

   SNES_NASM      *nasm = (SNES_NASM*)snes->data;
   PetscErrorCode ierr;
   PetscMPIInt    rank,size;
-  PetscInt       i,j,N,bsz;
+  PetscInt       i,N,bsz;
   PetscBool      iascii,isstring;
   PetscViewer    sviewer;
   MPI_Comm       comm;
       ierr = PetscViewerASCIIPrintf(viewer,"  Local solve info for each block is in the following SNES objects:\n");CHKERRQ(ierr);
       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
       ierr = PetscViewerASCIIPrintf(viewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
-      for (j=0; j<size; j++) {
-        ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
-        if (rank == j) {
-          for (i=0; i<nasm->n; i++) {
-            ierr = VecGetLocalSize(nasm->x[i],&bsz);CHKERRQ(ierr);
-            ierr = PetscViewerASCIIPrintf(sviewer,"[%d] local block number %D, size = %D\n",(int)rank,i,bsz);CHKERRQ(ierr);
-            ierr = SNESView(nasm->subsnes[i],sviewer);CHKERRQ(ierr);
-            ierr = PetscViewerASCIIPrintf(sviewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
-          }
-        }
-        ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
-        ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
+      ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
+      for (i=0; i<nasm->n; i++) {
+        ierr = VecGetLocalSize(nasm->x[i],&bsz);CHKERRQ(ierr);
+        ierr = PetscViewerASCIIPrintf(sviewer,"[%d] local block number %D, size = %D\n",(int)rank,i,bsz);CHKERRQ(ierr);
+        ierr = SNESView(nasm->subsnes[i],sviewer);CHKERRQ(ierr);
+        ierr = PetscViewerASCIIPrintf(sviewer,"- - - - - - - - - - - - - - - - - -\n");CHKERRQ(ierr);
       }
+      ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
+      ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
     }
   } else if (isstring) {

src/sys/classes/viewer/impls/ascii/asciiimpl.h

   PetscInt      tab_store;      /* store tabs value while tabs are turned off */
   PetscViewer   bviewer;        /* if PetscViewer is a singleton, this points to mother */
   PetscViewer   sviewer;        /* if PetscViewer has a singleton, this points to singleton */
+  PetscViewer   subviewer;      /* used with PetscViewerGetSubcomm() */
   char          *filename;
   PetscBool     storecompressed;
   PetscBool     closefile;

src/sys/classes/viewer/impls/ascii/filev.c

 
   PetscFunctionBegin;
   ierr = PetscViewerRestoreSingleton(vascii->bviewer,&viewer);CHKERRQ(ierr);
+  vascii->bviewer = NULL;
   PetscFunctionReturn(0);
 }
 
   PetscErrorCode    ierr;
 
   PetscFunctionBegin;
-  ierr = PetscViewerRestoreSubcomm(vascii->bviewer,PetscObjectComm((PetscObject)viewer),&viewer);CHKERRQ(ierr);
+  ierr = PetscViewerRestoreSubcomm(vascii->subviewer,PetscObjectComm((PetscObject)viewer),&viewer);CHKERRQ(ierr);
+  vascii->subviewer = NULL;
   PetscFunctionReturn(0);
 }
 
 {
   PetscViewer_ASCII *ascii = (PetscViewer_ASCII*)viewer->data;
   PetscMPIInt       rank;
-  PetscInt          tab;
+  PetscInt          tab,intab = ascii->tab;
   PetscErrorCode    ierr;
   FILE              *fd = ascii->fd;
-  PetscBool         iascii;
+  PetscBool         iascii,issingleton = PETSC_FALSE;
   int               err;
 
   PetscFunctionBegin;
   PetscValidCharPointer(format,2);
   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
   if (!iascii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not ASCII PetscViewer");
+  if (ascii->bviewer) {
+    viewer      = ascii->bviewer;
+    ascii       = (PetscViewer_ASCII*)viewer->data;
+    fd          = ascii->fd;
+    issingleton = PETSC_TRUE;
+  }
 
   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)viewer),&rank);CHKERRQ(ierr);
   if (!rank) {
     va_list Argp;
-    tab = ascii->tab;
+    tab = intab;
     while (tab--) {
       ierr = PetscFPrintf(PETSC_COMM_SELF,fd,"  ");CHKERRQ(ierr);
     }
     if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fflush() failed on file");
     if (petsc_history) {
       va_start(Argp,format);
-      tab = ascii->tab;
+      tab = intab;
       while (tab--) {
         ierr = PetscFPrintf(PETSC_COMM_SELF,petsc_history,"  ");CHKERRQ(ierr);
       }
       if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fflush() failed on file");
     }
     va_end(Argp);
+  } else if (issingleton) {
+    char        *string;
+    va_list     Argp;
+    size_t      fullLength;
+    PrintfQueue next;
+
+    ierr = PetscNew(struct _PrintfQueue,&next);CHKERRQ(ierr);
+    if (petsc_printfqueue) {
+      petsc_printfqueue->next = next;
+      petsc_printfqueue       = next;
+    } else {
+      petsc_printfqueuebase = petsc_printfqueue = next;
+    }
+    petsc_printfqueuelength++;
+    next->size = QUEUESTRINGSIZE;
+    ierr       = PetscMalloc(next->size*sizeof(char), &next->string);CHKERRQ(ierr);
+    ierr       = PetscMemzero(next->string,next->size);CHKERRQ(ierr);
+    string     = next->string;
+    tab        = intab;
+    tab       *= 2;
+    while (tab--) {
+      *string++ = ' ';
+    }
+    va_start(Argp,format);
+    ierr = PetscVSNPrintf(string,next->size-2*ascii->tab,format,&fullLength,Argp);CHKERRQ(ierr);
+    va_end(Argp);
   }
   PetscFunctionReturn(0);
 }
   ierr = PetscObjectGetName((PetscObject)viewer,&name);CHKERRQ(ierr);
   ierr = PetscObjectSetName((PetscObject)(*outviewer),name);CHKERRQ(ierr);
 
-  ((PetscViewer_ASCII*)((*outviewer)->data))->bviewer = viewer;
+  ((PetscViewer_ASCII*)((*outviewer)->data))->subviewer = viewer;
 
   (*outviewer)->ops->destroy = PetscViewerDestroy_ASCII_Subcomm;
   PetscFunctionReturn(0);
   vascii->fd        = PETSC_STDOUT;
   vascii->mode      = FILE_MODE_WRITE;
   vascii->bviewer   = 0;
+  vascii->subviewer = 0;
   vascii->sviewer   = 0;
   vascii->tab       = 0;
   vascii->tab_store = 0;

src/vec/vec/examples/tests/ex24.c

   ierr = VecScatterBegin(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
   ierr = VecScatterEnd(ctx,x,y,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
 
+  ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD,PETSC_TRUE);CHKERRQ(ierr);
   ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"----\n");CHKERRQ(ierr);
   ierr = PetscViewerGetSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
   ierr = VecView(y,sviewer);CHKERRQ(ierr); fflush(stdout);