Commits

Matt Knepley committed 72f2488 Merge

Merge branch 'knepley/fix-dm-clone' into knepley/pylith

* knepley/fix-dm-clone:
DMPlex: clones are already setup
rm white spaces
provide informative error message when MatGetMultiPorcBlock_xxx() is not supported
added const to char arguement
sys comm: remove redundant self-call to MPI_Attr_get in DelComm_Outer
bib: Kyrke-Smith, Katz, and Fowler on ice streams
Minor fixes to the website faq, mostly uncontroversial typo fixes.
sys comm: separate destructors for inner and outer comms
sys: use union for Bcast of an MPI_Comm in PetscObjectName
sys comm: use union instead of void* and memcpy for MPI_Comm as an attribute
sys: cannot do input validation when !PetscInitialized
build/nightlyscripts: checkout required branch as 'detached' so the builds are tolerant to deleted/recreated branches
changed final argument to PetscPClose from PetscInt to int since this is an in returned from a system call

Comments (0)

Files changed (19)

bin/maint/builddist

 
 # Clean and Update the git repository and check for branch
 cd $PETSC_DIR
-git checkout -f $branch
+git clean -q -f -d -x
+git fetch -q origin
+git checkout -f origin/$branch
 if [ "$?" != "0" ]; then
   echo 'Error: branch: $branch does not exist in $PETSC_DIR'
   exit
 fi
-git clean -q -f -d -x
-git pull -q
 
 pdir=`basename $PETSC_DIR`
 

bin/maint/buildtest

 rm -f build.log
 echo "Build on $MACH $ARCH $nPETSC_DIR `date` " > build.log
 
+# Note: we are using detatched heads - so 'git fetch/pull' is not done here.
 # Note use 'git clean' intead of manually deleting seleted old files.
-# This also requires 'clean' extension enabled on all nightly build machines.
-
-echo "Cleaning throughly and updating git clone at $nPETSC_DIR" >>& build.log
-(git reset --hard; git clean -q -f -d -x -e build.log;git pull -q) >>& build.log
-echo "Currently building git branch:" `git describe --contains --all HEAD` >>& build.log
+echo "Cleaning throughly at $nPETSC_DIR" >>& build.log
+(git reset --hard; git clean -q -f -d -x -e build.log) >>& build.log
 # if externalpackage tarball also packs a git repo - one has to explicitly remove it
 rm -rf ./externalpackages
+echo "Currently building git branch:" `git describe --contains --all HEAD` >>& build.log
+git log -1 >>& build.log
 
 setenv PETSC_DIR $nPETSC_DIR
 set PETSC_ARCH=${ARCH}

bin/maint/startnightly

       endif
     endif
     echo "Cleaning and updating clone at $USR@$MACH $LOC"
-    $SSH $USR@$MACH $DASHN "cd $LOC ; git checkout -f ${BRANCH}; git pull -q "
+    $SSH $USR@$MACH $DASHN "cd $LOC ; git fetch -q origin; git checkout -f origin/${BRANCH} "
   else
     rsync -e ssh -az --delete  $pdir/ ${USR}@${MACH}:${LOC}
   endif

include/petscsys.h

 
 #if defined(PETSC_HAVE_POPEN)
 PETSC_EXTERN PetscErrorCode PetscPOpen(MPI_Comm,const char[],const char[],const char[],FILE **);
-PETSC_EXTERN PetscErrorCode PetscPClose(MPI_Comm,FILE*,PetscInt*);
+PETSC_EXTERN PetscErrorCode PetscPClose(MPI_Comm,FILE*,int*);
 #endif
 
 PETSC_EXTERN PetscErrorCode PetscSynchronizedPrintf(MPI_Comm,const char[],...);
 PETSC_EXTERN PetscErrorCode PetscGetTmp(MPI_Comm,char[],size_t);
 PETSC_EXTERN PetscErrorCode PetscFileRetrieve(MPI_Comm,const char[],char[],size_t,PetscBool *);
 PETSC_EXTERN PetscErrorCode PetscLs(MPI_Comm,const char[],char[],size_t,PetscBool *);
-PETSC_EXTERN PetscErrorCode PetscOpenSocket(char*,int,int*);
+PETSC_EXTERN PetscErrorCode PetscOpenSocket(const char[],int,int*);
 PETSC_EXTERN PetscErrorCode PetscWebServe(MPI_Comm,int);
 
 /*

src/dm/impls/plex/plexcreate.c

   (*newdm)->sf = dm->sf;
   mesh         = (DM_Plex*) dm->data;
   mesh->refct++;
-  (*newdm)->data = mesh;
+  (*newdm)->setupcalled = PETSC_TRUE;
+  (*newdm)->data        = mesh;
   ierr           = PetscObjectChangeTypeName((PetscObject) *newdm, DMPLEX);CHKERRQ(ierr);
   ierr           = DMInitialize_Plex(*newdm);CHKERRQ(ierr);
   ierr           = DMGetApplicationContext(dm, &ctx);CHKERRQ(ierr);

src/docs/tex/petscapp.bib

  doi =          {10.1098/rspa.2009.0434},
  url = {http://www.earth.ox.ac.uk/~richardk/publications/Katz_Worster_PRSA10.pdf}
 }
+@article{kyrkesmith2013stress,
+  title={Stress balances of ice streams in a vertically integrated, higher-order formulation},
+  author={Kyrke-Smith, T.M. and Katz, R.F. and Fowler, A.C.},
+  journal={Journal of Glaciology},
+  volume={59},
+  number={215},
+  pages={449},
+  year={2013}
+}
 @article{bueler2009shallow,
   title={{Shallow shelf approximation as a ``sliding law'' in a thermomechanically coupled ice sheet model}},
   author={Bueler, E. and Brown, J.},

src/docs/website/documentation/faq.html

       <h3><a name="gfortran">What Fortran compiler do you recommend for the Apple Mac OS X?</a></h3>
 
       <p>
-        (as of 11/6/2010) We recommend installing gfortran from <a
-          href="http://hpc.sourceforge.net/">http://hpc.sourceforge.net</a>.  They
-        have gfortran-4.6.0 (experimental) for Snow Leopard (10.6) and gfortran
-        4.4.1 (prerelease) for Leopard (10.5).
-      </p>
+        (as of 04/29/2013) We recommend installing gfortran from <a
+          href="http://hpc.sourceforge.net/">http://hpc.sourceforge.net</a>. They have gfortran-4.7.0 for Lion (10.7) and gfortran 4.8 for Mountain Lion (10.8). </p>
 
       <p>
         Please contact Apple at <a
 
       <h3><a name="nosaij">You have AIJ and BAIJ matrix formats, and SBAIJ for symmetric storage, how come no SAIJ?</a></h3>
 
-      Just for historical reasons, the SBAIJ format with blocksize one is just as
+        Just for historical reasons; the SBAIJ format with blocksize one is just as
       efficient as an SAIJ would be.
 
       <h3><a name="domaindecomposition">How do I use PETSc for Domain Decomposition?</a></h3>
 PETSC_COMM_WORLD = NewComm;
     </pre>
 
-      <h3><a name="redistribute">The When solving a system with Dirichlet boundary conditions I can use MatZeroRows() to eliminate the Dirichlet rows but this results in a non-symmetric system. How can I apply Dirichlet boundary conditions but keep the matrix symmetric?</a></h3>
+      <h3><a name="redistribute">When solving a system with Dirichlet boundary conditions I can use MatZeroRows() to eliminate the Dirichlet rows but this results in a non-symmetric system. How can I apply Dirichlet boundary conditions but keep the matrix symmetric?</a></h3>
 
       <p>
         For nonsymmetric systems put the appropriate boundary solutions in the
-        x vector and use MatZeroRows() followed by KSPSetOperators(). For symmetric
-        problems use MatZeroRowsColumns() instead.  If you have many Dirichlet
+        x vector and use <a href="http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatZeroRows.html">MatZeroRows()</a> followed by <a href="http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/KSP/KSPSetOperators.html">KSPSetOperators()</a>. For symmetric
+        problems use <a href="http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatZeroRowsColumns.html">MatZeroRowsColumns()</a> instead.  If you have many Dirichlet
         locations you can use MatZeroRows() (not MatZeroRowsColumns()) and
-        -ksp_type preonly -pc_type redistribute, see the manual page for
-        PCREDISTRIBUTE) and PETSc will repartition the parallel matrix for load
+        -ksp_type preonly -pc_type redistribute; see <a href="http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PC/PCREDISTRIBUTE.html">
+        PCREDISTRIBUTE</a>) and PETSc will repartition the parallel matrix for load
         balancing; in this case the new matrix solved remains symmetric even though
         MatZeroRows() is used.
       </p>
 
       <p>
-        An alternative approach is when assemblying the matrix, (generating values
-        and passing them to the matrix), never include locations for the Dirichlet
-        grid points in the vector and matrix, instead take them into account as you
+        An alternative approach is, when assemblying the matrix (generating values
+        and passing them to the matrix), never to include locations for the Dirichlet
+        grid points in the vector and matrix, instead taking them into account as you
         put the other values into the load.
       </p>
 
 
       <ol>
         <li>
-          Using the MATLAB Engine, this allows PETSc to automatically call MATLAB
-          to perform some specific computations. It does not allow MATLAB to be
+          Using the MATLAB Engine, allowing PETSc to automatically call MATLAB
+          to perform some specific computations. This does not allow MATLAB to be
           used interactively by the user. See the <a
             href="http://www.mcs.anl.gov/petsc/petsc-dev/docs/manualpages/Sys/PetscMatlabEngine.html">PetscMatlabEngine</a>.
         </li>
 
         <li>
-          To save PETSc Mat and Vecs to files that can be read from MATLAB use <a
+          To save PETSc Mats and Vecs to files that can be read from MATLAB use <a
             href="http://www.mcs.anl.gov/petsc/petsc-dev/docs/manualpages/Viewer/PetscViewerBinaryOpen.html">PetscViewerBinaryOpen()</a>
           viewer and VecView() or MatView() to save objects for MATLAB and
           VecLoad() and MatLoad() to get the objects that MATLAB has saved. See
           viewer that saves .mat files can then be loaded into MATLAB.
         </li>
         <li>
-          We are just being to develop in <a
+          We are just beginning to develop in <a
             href="../developers/index.html">petsc-dev</a> an API to call most of
           the PETSc function directly from MATLAB; we could use help in
           developing this. See bin/matlab/classes/PetscInitialize.m
         matrix-vector product via differencing, the residual norms computed by
         GMRES start to "drift" from the correct values. At the restart, we compute
         the residual norm directly, hence the "strange stuff," the difference
-        printed. The drifting, if it remains small, is harmless (doesn't effect the
+        printed. The drifting, if it remains small, is harmless (doesn't affect the
         accuracy of the solution that GMRES computes).
       </p>
 

src/ksp/pc/impls/bjacobi/bjacobi.c

   PetscFunctionReturn(0);
 }
 
-PETSC_EXTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat,MPI_Comm,MatReuse,Mat*);
+#include <petsc-private/matimpl.h>
 #undef __FUNCT__
 #define __FUNCT__ "PCSetUp_BJacobi_Multiproc"
 static PetscErrorCode PCSetUp_BJacobi_Multiproc(PC pc)
     subcomm         = mpjac->psubcomm->comm;
 
     /* Get matrix blocks of pmat */
-    ierr = MatGetMultiProcBlock_MPIAIJ(pc->pmat,subcomm,MAT_INITIAL_MATRIX,&mpjac->submats);CHKERRQ(ierr);
+    if (!pc->pmat->ops->getmultiprocblock) SETERRQ(PetscObjectComm((PetscObject)pc->pmat),PETSC_ERR_SUP,"No support for the requested operation");
+    ierr = (*pc->pmat->ops->getmultiprocblock)(pc->pmat,subcomm,MAT_INITIAL_MATRIX,&mpjac->submats);CHKERRQ(ierr);
 
     /* create a new PC that processors in each subcomm have copy of */
     ierr = PetscMalloc(sizeof(KSP),&jac->ksp);CHKERRQ(ierr);
     if (pc->flag == DIFFERENT_NONZERO_PATTERN) {
       /* destroy old matrix blocks, then get new matrix blocks */
       if (mpjac->submats) {ierr = MatDestroy(&mpjac->submats);CHKERRQ(ierr);}
-      ierr = MatGetMultiProcBlock_MPIAIJ(pc->pmat,subcomm,MAT_INITIAL_MATRIX,&mpjac->submats);CHKERRQ(ierr);
+      ierr = (*pc->pmat->ops->getmultiprocblock)(pc->pmat,subcomm,MAT_INITIAL_MATRIX,&mpjac->submats);CHKERRQ(ierr);
     } else {
-      ierr = MatGetMultiProcBlock_MPIAIJ(pc->pmat,subcomm,MAT_REUSE_MATRIX,&mpjac->submats);CHKERRQ(ierr);
+      ierr = (*pc->pmat->ops->getmultiprocblock)(pc->pmat,subcomm,MAT_REUSE_MATRIX,&mpjac->submats);CHKERRQ(ierr);
     }
     ierr = KSPSetOperators(jac->ksp[0],mpjac->submats,mpjac->submats,pc->flag);CHKERRQ(ierr);
   }

src/mat/impls/aij/mpi/mpb_aij.c

 
 #undef __FUNCT__
 #define __FUNCT__ "MatGetMultiProcBlock_MPIAIJ"
-/*
-    Developers Note: This is used directly by some preconditioners, hence is PETSC_EXTERN
-*/
-PETSC_EXTERN PetscErrorCode  MatGetMultiProcBlock_MPIAIJ(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
+PetscErrorCode  MatGetMultiProcBlock_MPIAIJ(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
 {
   PetscErrorCode ierr;
   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;

src/mat/impls/aij/mpi/mpiaij.h

 
 PETSC_INTERN PetscErrorCode MatGetSubMatrix_MPIAIJ(Mat,IS,IS,MatReuse,Mat*);
 PETSC_INTERN PetscErrorCode MatGetSubMatrix_MPIAIJ_Private (Mat,IS,IS,PetscInt,MatReuse,Mat*);
-PETSC_EXTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat,MPI_Comm,MatReuse,Mat*);
+PETSC_INTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat,MPI_Comm,MatReuse,Mat*);
 
 PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ(Mat,PetscViewer);
 PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat,Mat,MatReuse,PetscReal,Mat*);

src/mat/interface/matrix.c

 .  subMat - 'parallel submatrices each spans a given subcomm
 
   Notes:
-  The submatrix partition across processors is dicated by 'subComm' a
+  The submatrix partition across processors is dictated by 'subComm' a
   communicator obtained by com_split(comm). The comm_split
-  is not restriced to be grouped with consequitive original ranks.
+  is not restriced to be grouped with consecutive original ranks.
 
   Due the comm_split() usage, the parallel layout of the submatrices
   map directly to the layout of the original matrix [wrt the local

src/sys/classes/viewer/impls/socket/send.c

 
 .seealso:   PetscSocketListen(), PetscSocketEstablish()
 */
-PetscErrorCode  PetscOpenSocket(char *hostname,int portnum,int *t)
+PetscErrorCode  PetscOpenSocket(const char hostname[],int portnum,int *t)
 {
   struct sockaddr_in sa;
   struct hostent     *hp;

src/sys/error/err.c

   char           command[PETSC_MAX_PATH_LEN];
   const char     *pdir;
   FILE           *fp;
-  PetscInt       rval;
+  int            rval;
 
   PetscFunctionBegin;
   ierr = PetscGetPetscDir(&pdir);if (ierr) PetscFunctionReturn(ierr);

src/sys/fileio/fretrieve.c

   size_t         len = 0;
   PetscBool      flg1,flg2,flg3,sharedtmp,exists;
 #if defined(PETSC_HAVE_POPEN)
-  PetscInt       rval;
+  int            rval;
 #endif
 
   PetscFunctionBegin;

src/sys/fileio/mpiuopen.c

 .seealso: PetscFOpen(), PetscFClose(), PetscPOpen()
 
 @*/
-PetscErrorCode PetscPClose(MPI_Comm comm,FILE *fd,PetscInt *rval)
+PetscErrorCode PetscPClose(MPI_Comm comm,FILE *fd,int *rval)
 {
   PetscErrorCode ierr;
   PetscMPIInt    rank;

src/sys/objects/ftn-custom/zstart.c

 
 PETSC_EXTERN void MPIAPI PetscMaxSum_Local(void*,void*,PetscMPIInt*,MPI_Datatype*);
 PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelCounter(MPI_Comm,PetscMPIInt,void*,void*);
-PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm(MPI_Comm,PetscMPIInt,void*,void*);
+PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm_Inner(MPI_Comm,PetscMPIInt,void*,void*);
+PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm_Outer(MPI_Comm,PetscMPIInt,void*,void*);
 
 extern PetscErrorCode  PetscOptionsCheckInitial_Private(void);
 extern PetscErrorCode  PetscOptionsCheckInitial_Components(void);
   if (*ierr) {(*PetscErrorPrintf)("PetscInitialize:Creating MPI ops\n");return;}
   *ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelCounter,&Petsc_Counter_keyval,(void*)0);
   if (*ierr) {(*PetscErrorPrintf)("PetscInitialize:Creating MPI keyvals\n");return;}
-  *ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm,&Petsc_InnerComm_keyval,(void*)0);
+  *ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm_Outer,&Petsc_InnerComm_keyval,(void*)0);
   if (*ierr) {(*PetscErrorPrintf)("PetscInitialize:Creating MPI keyvals\n");return;}
-  *ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm,&Petsc_OuterComm_keyval,(void*)0);
+  *ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm_Inner,&Petsc_OuterComm_keyval,(void*)0);
   if (*ierr) {(*PetscErrorPrintf)("PetscInitialize:Creating MPI keyvals\n");return;}
 
   /*

src/sys/objects/pinit.c

 /*@
       PetscInitialized - Determine whether PETSc is initialized.
 
-7   Level: beginner
+   Level: beginner
 
 .seealso: PetscInitialize(), PetscInitializeNoArguments(), PetscInitializeFortran()
 @*/
-PetscErrorCode  PetscInitialized(PetscBool  *isInitialized)
+PetscErrorCode PetscInitialized(PetscBool  *isInitialized)
 {
-  PetscFunctionBegin;
-  PetscValidPointer(isInitialized, 1);
   *isInitialized = PetscInitializeCalled;
-  PetscFunctionReturn(0);
+  return 0;
 }
 
 #undef __FUNCT__
 @*/
 PetscErrorCode  PetscFinalized(PetscBool  *isFinalized)
 {
-  PetscFunctionBegin;
-  PetscValidPointer(isFinalized, 1);
   *isFinalized = PetscFinalizeCalled;
-  PetscFunctionReturn(0);
+  return 0;
 }
 
 extern PetscErrorCode PetscOptionsCheckInitial_Private(void);
 }
 
 #undef __FUNCT__
-#define __FUNCT__ "Petsc_DelComm"
+#define __FUNCT__ "Petsc_DelComm_Outer"
 /*
-  This does not actually free anything, it simply marks when a reference count to an internal or external MPI_Comm reaches zero and the
-  the external MPI_Comm drops its reference to the internal or external MPI_Comm
+  This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Attr_delete) or when the user
+  calls MPI_Comm_free().
+
+  This is the only entry point for breaking the links between inner and outer comms.
 
   This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator.
 
   Note: this is declared extern "C" because it is passed to MPI_Keyval_create()
 
 */
-PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm(MPI_Comm comm,PetscMPIInt keyval,void *attr_val,void *extra_state)
+PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm_Outer(MPI_Comm comm,PetscMPIInt keyval,void *attr_val,void *extra_state)
 {
   PetscErrorCode ierr;
   PetscMPIInt    flg;
-  MPI_Comm       icomm;
-  void           *ptr;
+  union {MPI_Comm comm; void *ptr;} icomm,ocomm;
 
   PetscFunctionBegin;
-  ierr = MPI_Attr_get(comm,Petsc_InnerComm_keyval,&ptr,&flg);CHKERRQ(ierr);
-  if (flg) {
-    /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-    ierr = PetscMemcpy(&icomm,&ptr,sizeof(MPI_Comm));CHKERRQ(ierr);
-    ierr = MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ptr,&flg);CHKERRQ(ierr);
-    if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected reference to outer comm");
-    ierr = MPI_Attr_delete(icomm,Petsc_OuterComm_keyval);CHKERRQ(ierr);
-    ierr = PetscInfo1(0,"User MPI_Comm m %ld is being freed, removing reference from inner PETSc comm to this outer comm\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
-  } else {
-    ierr = PetscInfo1(0,"Removing reference to PETSc communicator imbedded in a user MPI_Comm m %ld\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
-  }
+  if (keyval != Petsc_InnerComm_keyval) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Unexpected keyval");
+  icomm.ptr = attr_val;
+
+  ierr = MPI_Attr_get(icomm.comm,Petsc_OuterComm_keyval,&ocomm,&flg);CHKERRQ(ierr);
+  if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected reference to outer comm");
+  if (ocomm.comm != comm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm has reference to non-matching outer comm");
+  ierr = MPI_Attr_delete(icomm.comm,Petsc_OuterComm_keyval);CHKERRQ(ierr); /* Calls Petsc_DelComm_Inner */
+  ierr = PetscInfo1(0,"User MPI_Comm %ld is being freed after removing reference from inner PETSc comm to this outer comm\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
+  PetscFunctionReturn(MPI_SUCCESS);
+}
+
+#undef __FUNCT__
+#define __FUNCT__ "Petsc_DelComm_Inner"
+/*
+ * This is invoked on the inner comm when Petsc_DelComm_Outer calls MPI_Attr_delete.  It should not be reached any other way.
+ */
+PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm_Inner(MPI_Comm comm,PetscMPIInt keyval,void *attr_val,void *extra_state)
+{
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  ierr = PetscInfo1(0,"Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
   PetscFunctionReturn(MPI_SUCCESS);
 }
 
      Attributes to be set on PETSc communicators
   */
   ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelCounter,&Petsc_Counter_keyval,(void*)0);CHKERRQ(ierr);
-  ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm,&Petsc_InnerComm_keyval,(void*)0);CHKERRQ(ierr);
-  ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm,&Petsc_OuterComm_keyval,(void*)0);CHKERRQ(ierr);
+  ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm_Outer,&Petsc_InnerComm_keyval,(void*)0);CHKERRQ(ierr);
+  ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm_Inner,&Petsc_OuterComm_keyval,(void*)0);CHKERRQ(ierr);
 
   /*
      Build the options database
     PetscCommCounter *counter;
     PetscMPIInt      flg;
     MPI_Comm         icomm;
-    void             *ptr;
-    ierr = MPI_Attr_get(PETSC_COMM_SELF,Petsc_InnerComm_keyval,&ptr,&flg);CHKERRQ(ierr);
+    union {MPI_Comm comm; void *ptr;} ucomm;
+    ierr = MPI_Attr_get(PETSC_COMM_SELF,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
     if (flg) {
-      /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-      ierr = PetscMemcpy(&icomm,&ptr,sizeof(MPI_Comm));CHKERRQ(ierr);
+      icomm = ucomm.comm;
       ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
 
       ierr = MPI_Attr_delete(icomm,Petsc_Counter_keyval);CHKERRQ(ierr);
       ierr = MPI_Comm_free(&icomm);CHKERRQ(ierr);
     }
-    ierr = MPI_Attr_get(PETSC_COMM_WORLD,Petsc_InnerComm_keyval,&ptr,&flg);CHKERRQ(ierr);
+    ierr = MPI_Attr_get(PETSC_COMM_WORLD,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
     if (flg) {
-      /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-      ierr = PetscMemcpy(&icomm,&ptr,sizeof(MPI_Comm));CHKERRQ(ierr);
+      icomm = ucomm.comm;
       ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
       if (!flg) SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
 

src/sys/objects/pname.c

   PetscFunctionBegin;
   PetscValidHeader(obj,1);
   if (!obj->name) {
-    void *commp = 0;
+    union {MPI_Comm comm; void *ptr; char raw[sizeof(MPI_Comm)]; } ucomm;
     ierr = MPI_Attr_get(obj->comm,Petsc_Counter_keyval,(void*)&counter,&flg);CHKERRQ(ierr);
     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
-    ierr = PetscMemcpy(&commp,&obj->comm,PetscMin(sizeof(commp),sizeof(obj->comm)));CHKERRQ(ierr);
-    ierr = MPI_Bcast((PETSC_UINTPTR_T*)&commp,1,MPIU_SIZE_T,0,obj->comm);CHKERRQ(ierr);
-    ierr = PetscSNPrintf(name,64,"%s_%p_%D",obj->class_name,commp,counter->namecount++);CHKERRQ(ierr);
+    ucomm.ptr = NULL;
+    ucomm.comm = obj->comm;
+    ierr = MPI_Bcast(ucomm.raw,sizeof(MPI_Comm),MPI_BYTE,0,obj->comm);CHKERRQ(ierr);
+    /* If the union has extra bytes, their value is implementation-dependent, but they will normally be what we set last
+     * in 'ucomm.ptr = NULL'.  This output is always implementation-defined (and varies from run to run) so the union
+     * abuse acceptable. */
+    ierr = PetscSNPrintf(name,64,"%s_%p_%D",obj->class_name,ucomm.ptr,counter->namecount++);CHKERRQ(ierr);
     ierr = PetscStrallocpy(name,&obj->name);CHKERRQ(ierr);
   }
   PetscFunctionReturn(0);

src/sys/objects/tagm.c

   ierr = MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
 
   if (!flg) {  /* this is NOT a PETSc comm */
-    void *ptr;
+    union {MPI_Comm comm; void *ptr;} ucomm;
     /* check if this communicator has a PETSc communicator imbedded in it */
-    ierr = MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ptr,&flg);CHKERRQ(ierr);
+    ierr = MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
     if (!flg) {
       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
       ierr = MPI_Comm_dup(comm_in,comm_out);CHKERRQ(ierr);
       ierr = PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);CHKERRQ(ierr);
 
       /* save PETSc communicator inside user communicator, so we can get it next time */
-      /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-      ierr = PetscMemcpy(&ptr,comm_out,sizeof(MPI_Comm));CHKERRQ(ierr);
-      ierr = MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ptr);CHKERRQ(ierr);
-      /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-      ierr = PetscMemcpy(&ptr,&comm_in,sizeof(MPI_Comm));CHKERRQ(ierr);
-      ierr = MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ptr);CHKERRQ(ierr);
+      ucomm.comm = *comm_out;   /* ONLY the comm part of the union is significant. */
+      ierr = MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);CHKERRQ(ierr);
+      ucomm.comm = comm_in;
+      ierr = MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);CHKERRQ(ierr);
     } else {
+      *comm_out = ucomm.comm;
       /* pull out the inner MPI_Comm and hand it back to the caller */
-      /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-      ierr = PetscMemcpy(comm_out,&ptr,sizeof(MPI_Comm));CHKERRQ(ierr);
       ierr = MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
       ierr = PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);CHKERRQ(ierr);
   PetscCommCounter *counter;
   PetscMPIInt      flg;
   MPI_Comm         icomm = *comm,ocomm;
-  void             *ptr;
   PetscThreadComm  tcomm;
+  union {MPI_Comm comm; void *ptr;} ucomm;
 
   PetscFunctionBegin;
   if (*comm == MPI_COMM_NULL) PetscFunctionReturn(0);
   ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
-    ierr = MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ptr,&flg);CHKERRQ(ierr);
+    ierr = MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
-    /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-    ierr = PetscMemcpy(&icomm,&ptr,sizeof(MPI_Comm));CHKERRQ(ierr);
+    icomm = ucomm.comm;
     ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
   }
 
   if (!counter->refcount) {
     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
-    ierr = MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ptr,&flg);CHKERRQ(ierr);
+    ierr = MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
     if (flg) {
-      /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
-      ierr = PetscMemcpy(&ocomm,&ptr,sizeof(MPI_Comm));CHKERRQ(ierr);
-      ierr = MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ptr,&flg);CHKERRQ(ierr);
+      ocomm = ucomm.comm;
+      ierr = MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
       if (flg) {
         ierr = MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);CHKERRQ(ierr);
       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);