Commits

BarryFSmith committed 76ec155

fixed many examples that worked incorrectly for 64 bit indices

Comments (0)

Files changed (30)

src/ksp/ksp/examples/tests/ex37.c

     PetscMPIInt color,subrank,duprank,subsize;
     duprank = size-1 - rank;
     subsize = size/nsubcomm;
-    if (subsize*nsubcomm != size) SETERRQ2(comm,PETSC_ERR_SUP,"This example requires nsubcomm %D divides nproc %D",nsubcomm,size);
+    if (subsize*nsubcomm != size) SETERRQ2(comm,PETSC_ERR_SUP,"This example requires nsubcomm %D divides size %D",nsubcomm,size);
     color   = duprank/subsize;
     subrank = duprank - color*subsize;
     ierr    = PetscSubcommSetTypeGeneral(psubcomm,color,subrank);CHKERRQ(ierr);

src/ksp/ksp/examples/tutorials/ex42.c

 #define __FUNCT__ "DAViewVTK_write_PieceExtend"
 PetscErrorCode DAViewVTK_write_PieceExtend(FILE *vtk_fp,PetscInt indent_level,DM da,const char local_file_prefix[])
 {
-  PetscMPIInt    nproc,rank;
+  PetscMPIInt    size,rank;
   MPI_Comm       comm;
   const PetscInt *lx,*ly,*lz;
   PetscInt       M,N,P,pM,pN,pP,sum,*olx,*oly,*olz;
   PetscFunctionBeginUser;
   /* create file name */
   PetscObjectGetComm((PetscObject)da,&comm);
-  MPI_Comm_size(comm,&nproc);
+  MPI_Comm_size(comm,&size);
   MPI_Comm_rank(comm,&rank);
 
   ierr = DMDAGetInfo(da,0,&M,&N,&P,&pM,&pN,&pP,0,&stencil,0,0,0,0);CHKERRQ(ierr);
 PetscErrorCode DAView_3DVTK_PStructuredGrid(DM da,const char file_prefix[],const char local_file_prefix[])
 {
   MPI_Comm       comm;
-  PetscMPIInt    nproc,rank;
+  PetscMPIInt    size,rank;
   char           vtk_filename[PETSC_MAX_PATH_LEN];
   FILE           *vtk_fp = NULL;
   PetscInt       M,N,P,si,sj,sk,nx,ny,nz;
   PetscFunctionBeginUser;
   /* only master generates this file */
   PetscObjectGetComm((PetscObject)da,&comm);
-  MPI_Comm_size(comm,&nproc);
+  MPI_Comm_size(comm,&size);
   MPI_Comm_rank(comm,&rank);
 
   if (rank != 0) PetscFunctionReturn(0);

src/ksp/ksp/examples/tutorials/ex56.c

   ierr = PetscOptionsBegin(comm,NULL,"3D bilinear Q1 elasticity options","");CHKERRQ(ierr);
   {
     char nestring[256];
-    ierr = PetscSNPrintf(nestring,sizeof nestring,"number of elements in each direction, ne+1 must be a multiple of %D (nprocs^{1/3})",(PetscInt)(PetscPowReal((PetscReal)npe,1./3.) + .5));CHKERRQ(ierr);
+    ierr = PetscSNPrintf(nestring,sizeof nestring,"number of elements in each direction, ne+1 must be a multiple of %D (sizes^{1/3})",(PetscInt)(PetscPowReal((PetscReal)npe,1./3.) + .5));CHKERRQ(ierr);
     ierr = PetscOptionsInt("-ne",nestring,"",ne,&ne,NULL);CHKERRQ(ierr);
     ierr = PetscOptionsBool("-log_stages","Log stages of solve separately","",log_stages,&log_stages,NULL);CHKERRQ(ierr);
     ierr = PetscOptionsReal("-alpha","material coefficient inside circle","",soft_alpha,&soft_alpha,NULL);CHKERRQ(ierr);

src/ksp/ksp/examples/tutorials/ex59.c

 static PetscErrorCode InitializeDomainData(DomainData *dd)
 {
   PetscErrorCode ierr;
-  PetscMPIInt    nprocs,rank;
+  PetscMPIInt    sizes,rank;
   PetscInt       factor;
 
   PetscFunctionBeginUser;
   dd->gcomm = PETSC_COMM_WORLD;
-  ierr      = MPI_Comm_size(dd->gcomm,&nprocs);
+  ierr      = MPI_Comm_size(dd->gcomm,&sizes);
   ierr      = MPI_Comm_rank(dd->gcomm,&rank);
   /* test data passed in */
-  if (nprocs<2) SETERRQ(dd->gcomm,PETSC_ERR_USER,"This is not a uniprocessor test");
+  if (sizes<2) SETERRQ(dd->gcomm,PETSC_ERR_USER,"This is not a uniprocessor test");
   /* Get informations from command line */
   /* Processors/subdomains per dimension */
   /* Default is 1d problem */
-  dd->npx = nprocs;
+  dd->npx = sizes;
   dd->npy = 0;
   dd->npz = 0;
   dd->dim = 1;
   dd->scalingfactor = PetscPowScalar(10.0,(PetscScalar)factor*PetscPowScalar(-1.0,(PetscScalar)rank));
   /* test data passed in */
   if (dd->dim==1) {
-    if (nprocs!=dd->npx) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of mpi procs in 1D must be equal to npx");
+    if (sizes!=dd->npx) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of mpi procs in 1D must be equal to npx");
     if (dd->nex<dd->npx) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of elements per dim must be greater/equal than number of procs per dim");
   } else if (dd->dim==2) {
-    if (nprocs!=dd->npx*dd->npy) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of mpi procs in 2D must be equal to npx*npy");
+    if (sizes!=dd->npx*dd->npy) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of mpi procs in 2D must be equal to npx*npy");
     if (dd->nex<dd->npx || dd->ney<dd->npy) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of elements per dim must be greater/equal than number of procs per dim");
   } else {
-    if (nprocs!=dd->npx*dd->npy*dd->npz) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of mpi procs in 3D must be equal to npx*npy*npz");
+    if (sizes!=dd->npx*dd->npy*dd->npz) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of mpi procs in 3D must be equal to npx*npy*npz");
     if (dd->nex<dd->npx || dd->ney<dd->npy || dd->nez<dd->npz) SETERRQ(dd->gcomm,PETSC_ERR_USER,"Number of elements per dim must be greater/equal than number of procs per dim");
   }
   PetscFunctionReturn(0);

src/ksp/pc/impls/bddc/bddcfetidp.c

   PetscBool      skip_node,fully_redundant;
   PetscInt       i,j,k,s,n_boundary_dofs,n_global_lambda,n_vertices,partial_sum;
   PetscInt       n_local_lambda,n_lambda_for_dof,dual_size,n_neg_values,n_pos_values;
-  PetscMPIInt    rank,nprocs,buf_size,neigh;
+  PetscMPIInt    rank,size,buf_size,neigh;
   PetscScalar    scalar_value;
   PetscInt       *vertex_indices;
   PetscInt       *dual_dofs_boundary_indices,*aux_local_numbering_1,*aux_global_numbering;
   PetscFunctionBegin;
   ierr = PetscObjectGetComm((PetscObject)(fetidpmat_ctx->pc),&comm);CHKERRQ(ierr);
   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
-  ierr = MPI_Comm_size(comm,&nprocs);CHKERRQ(ierr);
+  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
 
   /* Default type of lagrange multipliers is non-redundant */
   fully_redundant = PETSC_FALSE;

src/ksp/pc/impls/redistribute/redistribute.c

   PetscLayout       map,nmap;
   PetscMPIInt       size,imdex,tag,n;
   PetscInt          *source = NULL;
-  PetscMPIInt       *nprocs = NULL,nrecvs;
+  PetscMPIInt       *sizes = NULL,nrecvs;
   PetscInt          j,nsends;
   PetscInt          *owner = NULL,*starts = NULL,count,slen;
   PetscInt          *rvalues,*svalues,recvtotal;
         load balance the non-diagonal rows
     */
     /*  count number of contributors to each processor */
-    ierr   = PetscMalloc2(size,PetscMPIInt,&nprocs,cnt,PetscInt,&owner);CHKERRQ(ierr);
-    ierr   = PetscMemzero(nprocs,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
+    ierr   = PetscMalloc2(size,PetscMPIInt,&sizes,cnt,PetscInt,&owner);CHKERRQ(ierr);
+    ierr   = PetscMemzero(sizes,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
     j      = 0;
     nsends = 0;
     for (i=rstart; i<rend; i++) {
       if (i < nmap->range[j]) j = 0;
       for (; j<size; j++) {
         if (i < nmap->range[j+1]) {
-          if (!nprocs[j]++) nsends++;
+          if (!sizes[j]++) nsends++;
           owner[i-rstart] = j;
           break;
         }
       }
     }
     /* inform other processors of number of messages and max length*/
-    ierr      = PetscGatherNumberOfMessages(comm,NULL,nprocs,&nrecvs);CHKERRQ(ierr);
-    ierr      = PetscGatherMessageLengths(comm,nsends,nrecvs,nprocs,&onodes1,&olengths1);CHKERRQ(ierr);
+    ierr      = PetscGatherNumberOfMessages(comm,NULL,sizes,&nrecvs);CHKERRQ(ierr);
+    ierr      = PetscGatherMessageLengths(comm,nsends,nrecvs,sizes,&onodes1,&olengths1);CHKERRQ(ierr);
     ierr      = PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);CHKERRQ(ierr);
     recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];
 
     */
     ierr      = PetscMalloc3(cnt,PetscInt,&svalues,nsends,MPI_Request,&send_waits,size,PetscInt,&starts);CHKERRQ(ierr);
     starts[0] = 0;
-    for (i=1; i<size; i++) starts[i] = starts[i-1] + nprocs[i-1];
+    for (i=1; i<size; i++) starts[i] = starts[i-1] + sizes[i-1];
     for (i=0; i<cnt; i++)  svalues[starts[owner[i]]++] = rows[i];
     for (i=0; i<cnt; i++)  rows[i] = rows[i] - rstart;
     red->drows = drows;
     ierr       = PetscFree(rows);CHKERRQ(ierr);
 
     starts[0] = 0;
-    for (i=1; i<size; i++) starts[i] = starts[i-1] + nprocs[i-1];
+    for (i=1; i<size; i++) starts[i] = starts[i-1] + sizes[i-1];
     count = 0;
     for (i=0; i<size; i++) {
-      if (nprocs[i]) {
-        ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
+      if (sizes[i]) {
+        ierr = MPI_Isend(svalues+starts[i],sizes[i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
       }
     }
 
     ierr = PetscFree(olengths1);CHKERRQ(ierr);
     ierr = PetscFree(onodes1);CHKERRQ(ierr);
     ierr = PetscFree3(rvalues,source,recv_waits);CHKERRQ(ierr);
-    ierr = PetscFree2(nprocs,owner);CHKERRQ(ierr);
+    ierr = PetscFree2(sizes,owner);CHKERRQ(ierr);
     if (nsends) {   /* wait on sends */
       ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
       ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);

src/mat/examples/tests/ex125.c

   Mat            A,RHS,C,F,X;
   Vec            u,x,b;
   PetscErrorCode ierr;
-  PetscMPIInt    rank,nproc;
+  PetscMPIInt    rank,size;
   PetscInt       i,m,n,nfact,nsolve,nrhs,ipack=0;
   PetscScalar    *array,rval;
   PetscReal      norm,tol=1.e-12;
 
   PetscInitialize(&argc,&args,(char*)0,help);
   ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
-  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &nproc);CHKERRQ(ierr);
+  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size);CHKERRQ(ierr);
 
   /* Determine file from which we read the matrix A */
   ierr = PetscOptionsGetString(NULL,"-f",file,PETSC_MAX_PATH_LEN,&flg);CHKERRQ(ierr);

src/mat/examples/tests/ex130.c

   Mat            A,F;
   Vec            u,x,b;
   PetscErrorCode ierr;
-  PetscMPIInt    rank,nproc;
+  PetscMPIInt    rank,size;
   PetscInt       m,n,nfact,ipack=0;
   PetscReal      norm,tol=1.e-12,Anorm;
   IS             perm,iperm;
 
   PetscInitialize(&argc,&args,(char*)0,help);
   ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
-  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &nproc);CHKERRQ(ierr);
+  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size);CHKERRQ(ierr);
 
   /* Determine file from which we read the matrix A */
   ierr = PetscOptionsGetString(NULL,"-f",file,PETSC_MAX_PATH_LEN,&flg);CHKERRQ(ierr);

src/mat/examples/tests/ex152.c

 {
   PetscErrorCode ierr;
   PetscBool      flg;
-  int            rank, size;
-  int            i, ni, status;
-  idx_t          *vtxdist, *xadj, *adjncy, *vwgt, *part;
+  PetscMPIInt    rank, size;
+  int            i, status;
+  idx_t          ni,isize,*vtxdist, *xadj, *adjncy, *vwgt, *part;
   idx_t          wgtflag=0, numflag=0, ncon=1, ndims=3, edgecut=0;
   idx_t          options[5];
   real_t         *xyz, *tpwgts, ubvec[1];
   vwgt = NULL;
 
   for (i = 0; i < size; i++) tpwgts[i] = 1. / size;
+  isize = size;
 
   ubvec[0]   = 1.05;
   options[0] = 1;
 
   ierr   = MPI_Comm_dup(MPI_COMM_WORLD, &comm);CHKERRQ(ierr);
   status = ParMETIS_V3_PartGeomKway(vtxdist, xadj, adjncy, vwgt,
-                                    NULL, &wgtflag, &numflag, &ndims, xyz, &ncon, &size, tpwgts, ubvec,
+                                    NULL, &wgtflag, &numflag, &ndims, xyz, &ncon, &isize, tpwgts, ubvec,
                                     options, &edgecut, part, &comm);CHKERRQPARMETIS(status);
   ierr = MPI_Comm_free(&comm);CHKERRQ(ierr);
 

src/mat/examples/tests/ex168.c

   Mat            A,F;
   Vec            u,x,b;
   PetscErrorCode ierr;
-  PetscMPIInt    rank,nproc;
+  PetscMPIInt    rank,size;
   PetscInt       m,n,nfact;
   PetscReal      norm,tol=1.e-12,Anorm;
   IS             perm,iperm;
 
   PetscInitialize(&argc,&args,(char*)0,help);
   ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
-  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &nproc);CHKERRQ(ierr);
+  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size);CHKERRQ(ierr);
 
   /* Determine file from which we read the matrix A */
   ierr = PetscOptionsGetString(NULL,"-f",file,PETSC_MAX_PATH_LEN,&flg);CHKERRQ(ierr);

src/mat/examples/tests/ex32.c

 
+#include <petscmat.h>
+
+#if !defined(PETSC_USE_64BIT_INDICES)
 static char help[] = "Reads in a matrix and vector in ASCII slap format. Writes\n\
 them using the PETSc sparse format. Input parameters are:\n\
   -fin <filename> : input file\n\
   -fout <filename> : output file\n\n";
-
-#include <petscmat.h>
+#endif
 
 #undef __FUNCT__
 #define __FUNCT__ "main"
 int main(int argc,char **args)
 {
+#if !defined(PETSC_USE_64BIT_INDICES)
   Mat            A;
   Vec            b;
   char           filein[PETSC_MAX_PATH_LEN],fileout[PETSC_MAX_PATH_LEN];
   ierr = MatDestroy(&A);CHKERRQ(ierr);
 
   ierr = PetscFinalize();
+#endif
   return 0;
 }
 

src/mat/examples/tests/ex50.c

 
+#include <petscmat.h>
+
+#if !defined(PETSC_USE_64BIT_INDICES)
 static char help[] = "Reads in a matrix and vector in ASCII format. Writes\n\
 them using the PETSc sparse format. Input parameters are:\n\
   -fin <filename> : input file\n\
   -fout <filename> : output file\n\n";
+#endif
 
-#include <petscmat.h>
 
 #undef __FUNCT__
 #define __FUNCT__ "main"
 int main(int argc,char **args)
 {
+#if !defined(PETSC_USE_64BIT_INDICES)
   Mat            A;
   Vec            b;
   char           filein[PETSC_MAX_PATH_LEN],finname[PETSC_MAX_PATH_LEN],fileout[PETSC_MAX_PATH_LEN];
   ierr = MatDestroy(&A);CHKERRQ(ierr);
 
   ierr = PetscFinalize();
+#endif
   return 0;
 }
 

src/mat/examples/tests/ex67f.F

       IS       isrow
 
       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
-
+#if defined(PETSC_USE_64BIT_INDICES)
+      call PetscViewerBinaryOpen(PETSC_COMM_WORLD,                          &
+     & '../../../../share/petsc/datafiles/matrices/' //                       &
+     & 'ns-real-int64-float64',                                               &
+     &                          FILE_MODE_READ,v,ierr)
+#else
       call PetscViewerBinaryOpen(PETSC_COMM_WORLD,                          &
      & '../../../../share/petsc/datafiles/matrices/' //                       &
      & 'ns-real-int32-float64',                                               &
      &                          FILE_MODE_READ,v,ierr)
+#endif
 
       call MatCreate(PETSC_COMM_WORLD,A,ierr)
       call MatSetType(A, MATSEQAIJ,ierr)

src/mat/examples/tests/ex9.c

   if (size > 1) {
     MPI_Comm       subcomm;
     Mat            Credundant;
-    PetscMPIInt    nsubcomms=size,subsize;
+    PetscInt       Nsubcomms = size;
+    PetscMPIInt    nsubcomms,subsize;
 
-    ierr = PetscOptionsGetInt(NULL,"-nsubcomms",&nsubcomms,NULL);CHKERRQ(ierr);
+    ierr = PetscOptionsGetInt(NULL,"-nsubcomms",&Nsubcomms,NULL);CHKERRQ(ierr);
+    ierr = PetscMPIIntCast(Nsubcomms,&nsubcomms);CHKERRQ(ierr);
     if (nsubcomms > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"nsubcomms %d cannot > ncomms %d",nsubcomms,size);
 
     ierr = MatGetRedundantMatrix(C,nsubcomms,MPI_COMM_NULL,MAT_INITIAL_MATRIX,&Credundant);CHKERRQ(ierr);

src/mat/examples/tests/makefile

                                  ex15.PETSc runex15 ex15.rm ex20.PETSc runex20 ex20.rm ex21.PETSc runex21 ex21.rm ex35.PETSc \
                                  runex35 ex35.rm  ex48.PETSc runex48 ex48.rm ex71.PETSc ex71.rm \
                                  ex95.PETSc  ex95.rm ex101.PETSc runex101 ex101.rm
-TESTEXAMPLES_C_NOCOMPLEX       = ex32.PETSc ex32.rm ex41.PETSc runex41 ex41.rm  ex50.PETSc ex50.rm
-TESTEXAMPLES_DATAFILESPATH     = ex40.PETSc runex40 ex40.rm ex42.PETSc runex42 \
+TESTEXAMPLES_C_NOCOMPLEX       = ex32.PETSc ex32.rm  ex50.PETSc ex50.rm
+TESTEXAMPLES_DATAFILESPATH     = ex40.PETSc runex40 ex40.rm ex41.PETSc runex41 ex41.rm ex42.PETSc runex42 \
                                  ex42.rm  ex41.PETSc ex41.rm ex47.PETSc ex47.rm ex53.PETSc runex53 ex53.rm \
                                  ex94.PETSc runex94_matmatmult runex94_matmatmult_2 runex94_scalable0 runex94_scalable1 \
                                  runex94_2_mattransposematmult_nonscalable runex94_2_mattransposematmult_matmatmult ex94.rm \

src/mat/examples/tutorials/ex5.c

 
   /* error checking on files */
   if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
-  ierr = MPI_Allreduce(header+2,&N,1,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr);
+  ierr = MPI_Allreduce(header+2,&N,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
   if (N != size*header[2]) SETERRQ(PETSC_COMM_SELF,1,"All files must have matrices with the same number of total columns");
 
   /* number of rows in matrix is sum of rows in all files */
   m    = header[1]; N = header[2];
-  ierr = MPI_Allreduce(&m,&M,1,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr);
+  ierr = MPI_Allreduce(&m,&M,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
 
   /* determine rows of matrices owned by each process */
   ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&rowners);CHKERRQ(ierr);

src/mat/impls/aij/mpi/mpiaij.c

   PetscErrorCode    ierr;
   PetscMPIInt       size = l->size,imdex,n,rank = l->rank,tag = ((PetscObject)A)->tag,lastidx = -1;
   PetscInt          i,*owners = A->rmap->range;
-  PetscInt          *nprocs,j,idx,nsends;
+  PetscInt          *sizes,j,idx,nsends;
   PetscInt          nmax,*svalues,*starts,*owner,nrecvs;
   PetscInt          *rvalues,count,base,slen,*source;
   PetscInt          *lens,*lrows,*values,m;
   PetscFunctionBegin;
   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
   /*  first count number of contributors to each processor */
-  ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
-  ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
+  ierr = PetscMalloc(2*size*sizeof(PetscInt),&sizes);CHKERRQ(ierr);
+  ierr = PetscMemzero(sizes,2*size*sizeof(PetscInt));CHKERRQ(ierr);
   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr); /* see note*/
   j    = 0;
   for (i=0; i<N; i++) {
     lastidx = idx;
     for (; j<size; j++) {
       if (idx >= owners[j] && idx < owners[j+1]) {
-        nprocs[2*j]++;
-        nprocs[2*j+1] = 1;
+        sizes[2*j]++;
+        sizes[2*j+1] = 1;
         owner[i]      = j;
 #if defined(PETSC_DEBUG)
         found = PETSC_TRUE;
     found = PETSC_FALSE;
 #endif
   }
-  nsends = 0;  for (i=0; i<size; i++) nsends += nprocs[2*i+1];
+  nsends = 0;  for (i=0; i<size; i++) nsends += sizes[2*i+1];
 
   /* inform other processors of number of messages and max length*/
-  ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
+  ierr = PetscMaxSum(comm,sizes,&nmax,&nrecvs);CHKERRQ(ierr);
 
   /* post receives:   */
   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr);
   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
 
   starts[0] = 0;
-  for (i=1; i<size; i++) starts[i] = starts[i-1] + nprocs[2*i-2];
+  for (i=1; i<size; i++) starts[i] = starts[i-1] + sizes[2*i-2];
   for (i=0; i<N; i++) svalues[starts[owner[i]]++] = rows[i];
 
   starts[0] = 0;
-  for (i=1; i<size+1; i++) starts[i] = starts[i-1] + nprocs[2*i-2];
+  for (i=1; i<size+1; i++) starts[i] = starts[i-1] + sizes[2*i-2];
   count = 0;
   for (i=0; i<size; i++) {
-    if (nprocs[2*i+1]) {
-      ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
+    if (sizes[2*i+1]) {
+      ierr = MPI_Isend(svalues+starts[i],sizes[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
     }
   }
   ierr = PetscFree(starts);CHKERRQ(ierr);
   ierr = PetscFree(rvalues);CHKERRQ(ierr);
   ierr = PetscFree2(lens,source);CHKERRQ(ierr);
   ierr = PetscFree(owner);CHKERRQ(ierr);
-  ierr = PetscFree(nprocs);CHKERRQ(ierr);
+  ierr = PetscFree(sizes);CHKERRQ(ierr);
   /* lrows are the local rows to be zeroed, slen is the number of local rows */
 
   /* zero diagonal part of matrix */

src/mat/impls/dense/mpi/mpidense.c

   Mat_MPIDense      *l = (Mat_MPIDense*)A->data;
   PetscErrorCode    ierr;
   PetscInt          i,*owners = A->rmap->range;
-  PetscInt          *nprocs,j,idx,nsends;
+  PetscInt          *sizes,j,idx,nsends;
   PetscInt          nmax,*svalues,*starts,*owner,nrecvs;
   PetscInt          *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source;
   PetscInt          *lens,*lrows,*values;
   if (A->rmap->N != A->cmap->N) SETERRQ(comm,PETSC_ERR_SUP,"Only handles square matrices");
   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only handles matrices with identical column and row ownership");
   /*  first count number of contributors to each processor */
-  ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
-  ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
+  ierr = PetscMalloc(2*size*sizeof(PetscInt),&sizes);CHKERRQ(ierr);
+  ierr = PetscMemzero(sizes,2*size*sizeof(PetscInt));CHKERRQ(ierr);
   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr);  /* see note*/
   for (i=0; i<N; i++) {
     idx   = rows[i];
     found = PETSC_FALSE;
     for (j=0; j<size; j++) {
       if (idx >= owners[j] && idx < owners[j+1]) {
-        nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
+        sizes[2*j]++; sizes[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
       }
     }
     if (!found) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
   }
   nsends = 0;
-  for (i=0; i<size; i++) nsends += nprocs[2*i+1];
+  for (i=0; i<size; i++) nsends += sizes[2*i+1];
 
   /* inform other processors of number of messages and max length*/
-  ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
+  ierr = PetscMaxSum(comm,sizes,&nmax,&nrecvs);CHKERRQ(ierr);
 
   /* post receives:   */
   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr);
   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
 
   starts[0] = 0;
-  for (i=1; i<size; i++) starts[i] = starts[i-1] + nprocs[2*i-2];
+  for (i=1; i<size; i++) starts[i] = starts[i-1] + sizes[2*i-2];
   for (i=0; i<N; i++) svalues[starts[owner[i]]++] = rows[i];
 
   starts[0] = 0;
-  for (i=1; i<size+1; i++) starts[i] = starts[i-1] + nprocs[2*i-2];
+  for (i=1; i<size+1; i++) starts[i] = starts[i-1] + sizes[2*i-2];
   count = 0;
   for (i=0; i<size; i++) {
-    if (nprocs[2*i+1]) {
-      ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
+    if (sizes[2*i+1]) {
+      ierr = MPI_Isend(svalues+starts[i],sizes[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
     }
   }
   ierr = PetscFree(starts);CHKERRQ(ierr);
   ierr = PetscFree(rvalues);CHKERRQ(ierr);
   ierr = PetscFree2(lens,source);CHKERRQ(ierr);
   ierr = PetscFree(owner);CHKERRQ(ierr);
-  ierr = PetscFree(nprocs);CHKERRQ(ierr);
+  ierr = PetscFree(sizes);CHKERRQ(ierr);
 
   /* fix right hand side if needed */
   if (x && b) {

src/mat/utils/matstash.c

   PetscScalar        **rvalues,*svalues;
   MPI_Comm           comm = stash->comm;
   MPI_Request        *send_waits,*recv_waits,*recv_waits1,*recv_waits2;
-  PetscMPIInt        *nprocs,*nlengths,nreceives;
+  PetscMPIInt        *sizes,*nlengths,nreceives;
   PetscInt           *sp_idx,*sp_idy;
   PetscScalar        *sp_val;
   PetscMatStashSpace space,space_next;
   bs2 = stash->bs*stash->bs;
 
   /*  first count number of contributors to each processor */
-  ierr = PetscMalloc(size*sizeof(PetscMPIInt),&nprocs);CHKERRQ(ierr);
-  ierr = PetscMemzero(nprocs,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
+  ierr = PetscMalloc(size*sizeof(PetscMPIInt),&sizes);CHKERRQ(ierr);
+  ierr = PetscMemzero(sizes,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&nlengths);CHKERRQ(ierr);
   ierr = PetscMemzero(nlengths,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
   ierr = PetscMalloc((stash->n+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr);
   /* Now check what procs get messages - and compute nsends. */
   for (i=0, nsends=0; i<size; i++) {
     if (nlengths[i]) {
-      nprocs[i] = 1; nsends++;
+      sizes[i] = 1; nsends++;
     }
   }
 
   {PetscMPIInt *onodes,*olengths;
    /* Determine the number of messages to expect, their lengths, from from-ids */
-   ierr = PetscGatherNumberOfMessages(comm,nprocs,nlengths,&nreceives);CHKERRQ(ierr);
+   ierr = PetscGatherNumberOfMessages(comm,sizes,nlengths,&nreceives);CHKERRQ(ierr);
    ierr = PetscGatherMessageLengths(comm,nsends,nreceives,nlengths,&onodes,&olengths);CHKERRQ(ierr);
    /* since clubbing row,col - lengths are multiplied by 2 */
    for (i=0; i<nreceives; i++) olengths[i] *=2;
   for (i=1; i<size; i++) startv[i] = startv[i-1] + nlengths[i-1];
 
   for (i=0,count=0; i<size; i++) {
-    if (nprocs[i]) {
+    if (sizes[i]) {
       ierr = MPI_Isend(sindices+2*startv[i],2*nlengths[i],MPIU_INT,i,tag1,comm,send_waits+count++);CHKERRQ(ierr);
       ierr = MPI_Isend(svalues+bs2*startv[i],bs2*nlengths[i],MPIU_SCALAR,i,tag2,comm,send_waits+count++);CHKERRQ(ierr);
     }
 #if defined(PETSC_USE_INFO)
   ierr = PetscInfo1(NULL,"No of messages: %d \n",nsends);CHKERRQ(ierr);
   for (i=0; i<size; i++) {
-    if (nprocs[i]) {
+    if (sizes[i]) {
       ierr = PetscInfo2(NULL,"Mesg_to: %d: size: %d bytes\n",i,nlengths[i]*(bs2*sizeof(PetscScalar)+2*sizeof(PetscInt)));CHKERRQ(ierr);
     }
   }
   ierr = PetscFree(nlengths);CHKERRQ(ierr);
   ierr = PetscFree(owner);CHKERRQ(ierr);
   ierr = PetscFree2(startv,starti);CHKERRQ(ierr);
-  ierr = PetscFree(nprocs);CHKERRQ(ierr);
+  ierr = PetscFree(sizes);CHKERRQ(ierr);
 
   /* recv_waits need to be contiguous for MatStashScatterGetMesg_Private() */
   ierr = PetscMalloc(2*nreceives*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);

src/snes/examples/tutorials/ex28.c

   PetscErrorCode ierr;
   DM             dau,dak,pack;
   const PetscInt *lxu;
-  PetscInt       *lxk,m,nprocs;
+  PetscInt       *lxk,m,sizes;
   User           user;
   SNES           snes;
   Vec            X,F,Xu,Xk,Fu,Fk;
   ierr = DMSetOptionsPrefix(dau,"u_");CHKERRQ(ierr);
   ierr = DMSetFromOptions(dau);CHKERRQ(ierr);
   ierr = DMDAGetOwnershipRanges(dau,&lxu,0,0);CHKERRQ(ierr);
-  ierr = DMDAGetInfo(dau,0, &m,0,0, &nprocs,0,0, 0,0,0,0,0,0);CHKERRQ(ierr);
-  ierr = PetscMalloc(nprocs*sizeof(*lxk),&lxk);CHKERRQ(ierr);
-  ierr = PetscMemcpy(lxk,lxu,nprocs*sizeof(*lxk));CHKERRQ(ierr);
+  ierr = DMDAGetInfo(dau,0, &m,0,0, &sizes,0,0, 0,0,0,0,0,0);CHKERRQ(ierr);
+  ierr = PetscMalloc(sizes*sizeof(*lxk),&lxk);CHKERRQ(ierr);
+  ierr = PetscMemcpy(lxk,lxu,sizes*sizeof(*lxk));CHKERRQ(ierr);
   lxk[0]--;
   ierr = DMDACreate1d(PETSC_COMM_WORLD,DMDA_BOUNDARY_NONE,m-1,1,1,lxk,&dak);CHKERRQ(ierr);
   ierr = DMSetOptionsPrefix(dak,"k_");CHKERRQ(ierr);

src/sys/classes/random/examples/tutorials/ex2.c

 #define DATAFILENAME "ex2_stock.txt"
 
 struct himaInfoTag {
-  int    n;
-  double r;
-  double dt;
-  int    totalNumSim;
-  double *St0;
-  double *vol;
+  PetscInt    n;
+  double      r;
+  double      dt;
+  PetscInt    totalNumSim;
+  double      *St0;
+  double      *vol;
 };
 typedef struct himaInfoTag himaInfo;
 
 PetscErrorCode readData(MPI_Comm comm,himaInfo *hinfo);
 double mcVal(double St, double r, double vol, double dt, double eps);
 void exchange(double *a, double *b);
-double basketPayoff(double vol[], double St0[], int n, double r,double dt, double eps[]);
-void stdNormalArray(double *eps, int size,PetscRandom ran);
-unsigned long divWork(int id, unsigned long num, int np);
+double basketPayoff(double vol[], double St0[], PetscInt n, double r,double dt, double eps[]);
+void stdNormalArray(double *eps, PetscInt numdim,PetscRandom ran);
+PetscInt divWork(PetscMPIInt id, PetscInt num, PetscMPIInt size);
 
 /*
    Contributed by Xiaoyan Zeng <zengxia@iit.edu> and Liu, Kwong Ip" <kiliu@math.hkbu.edu.hk>
 {
   /* double         payoff; */
   double         r,dt;
-  int            n;
+  PetscInt            n;
   unsigned long  i,myNumSim,totalNumSim,numdim;
   double         *vol, *St0, x, totalx;
-  int            np,myid;
+  PetscMPIInt   size,rank;
   double         *eps;
   himaInfo       hinfo;
   PetscRandom    ran;
 #endif
   ierr = PetscRandomSetFromOptions(ran);CHKERRQ(ierr);
 
-  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &np);CHKERRQ(ierr);       /* number of nodes */
-  ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &myid);CHKERRQ(ierr);     /* my ranking */
+  ierr = MPI_Comm_size(PETSC_COMM_WORLD, &size);CHKERRQ(ierr);       /* number of nodes */
+  ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);     /* my ranking */
 
   ierr = PetscOptionsHasName(NULL, "-check_generators", &flg);CHKERRQ(ierr);
   if (flg) {
     ierr = PetscRandomGetValue(ran,(PetscScalar*)&r);
-    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] rval: %g\n",myid,r);
+    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] rval: %g\n",rank,r);
     ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
   }
 
   if (numdim%2 == 1) numdim++;
   eps = (double*)malloc(sizeof(double)*numdim);
 
-  myNumSim = divWork(myid,totalNumSim,np);
+  myNumSim = divWork(rank,totalNumSim,size);
 
   x = 0;
   for (i=0; i<myNumSim; i++) {
   ierr = MPI_Reduce(&x, &totalx, 1, MPI_DOUBLE, MPI_SUM,0,PETSC_COMM_WORLD);CHKERRQ(ierr);
   /* payoff = exp(-r*dt*n)*(totalx/totalNumSim);
   ierr = PetscPrintf(PETSC_COMM_WORLD,"Option price = $%.3f using %ds of %s computation with %d %s for %d stocks, %d trading period per year, %.2f%% interest rate\n",
-   payoff,(int)(stop - start),"parallel",np,"processors",n,(int)(1/dt),r);CHKERRQ(ierr); */
+   payoff,(int)(stop - start),"parallel",size,"processors",n,(int)(1/dt),r);CHKERRQ(ierr); */
 
   free(vol);
   free(eps);
   return 0;
 }
 
-void stdNormalArray(double *eps, int size, PetscRandom ran)
+void stdNormalArray(double *eps, PetscInt numdim, PetscRandom ran)
 {
   int            i;
   double         u1,u2,t;
   PetscErrorCode ierr;
 
-  for (i=0; i<size; i+=2) {
+  for (i=0; i<numdim; i+=2) {
     ierr = PetscRandomGetValue(ran,(PetscScalar*)&u1);CHKERRABORT(PETSC_COMM_WORLD,ierr);
     ierr = PetscRandomGetValue(ran,(PetscScalar*)&u2);CHKERRABORT(PETSC_COMM_WORLD,ierr);
 
 }
 
 
-double basketPayoff(double vol[], double St0[], int n, double r,double dt, double eps[])
+double basketPayoff(double vol[], double St0[], PetscInt n, double r,double dt, double eps[])
 {
   double Stk[PETSC_MAXBSIZE], temp;
   double payoff;
-  int    maxk,i,j;
-  int    pointcount=0;
+  PetscInt    maxk,i,j;
+  PetscInt    pointcount=0;
 
   for (i=0;i<n;i++) Stk[i] = St0[i];
 
   return (St * exp((r-0.5*vol*vol)*dt + vol*sqrt(dt)*eps));
 }
 
-unsigned long divWork(int id, unsigned long num, int np)
+PetscInt divWork(PetscMPIInt id, PetscInt num, PetscMPIInt size)
 {
-  unsigned long numit;
+  PetscInt numit;
 
-  numit = (unsigned long)(((double)num)/np);
+  numit = (PetscInt)(((double)num)/size);
   numit++;
   return numit;
 }

src/sys/examples/tutorials/ex5f90.F90

       type(bag_data_type)          :: dummydata
       character(len=1),pointer     :: dummychar(:)
       PetscViewer viewer
-      PetscSizeT sizeofbag,sizeofint
-      PetscSizeT sizeofscalar,sizeoftruth
-      PetscSizeT sizeofchar,sizeofreal
+      PetscSizeT sizeofbag
       Character(len=99) list(6)
+      PetscInt three
 
       Call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
       list(1) = 'a123'
       list(4) = 'list'
       list(5) = 'prefix_'
       list(6) = ''
+!     cannot just pass a 3 to PetscBagRegisterXXXArray() because it is expecting a PetscInt
+      three   = 3
 
 !   compute size of the data
-!      call PetscDataTypeGetSize(PETSC_INT,sizeofint,ierr)
-!      call PetscDataTypeGetSize(PETSC_SCALAR,sizeofscalar,ierr)
-!      call PetscDataTypeGetSize(PETSC_BOOL,sizeoftruth,ierr)
-       call PetscDataTypeGetSize(PETSC_CHAR,sizeofchar,ierr)
-!      call PetscDataTypeGetSize(PETSC_REAL,sizeofreal,ierr)
-
-!     really need a sizeof(data) operator here. There could be padding inside the
-!     structure due to alignment issues - so, this computed value cold be wrong.
-!      sizeofbag = sizeofint + sizeofscalar + sizeoftruth + sizeofchar*80 &
-!     &       + 3*sizeofreal+3*sizeofreal
-!     That is correct... unless the sequence keyword is used in the derived
-!     types, this length will be wrong because of padding
-!     this is a situation where the transfer function is very helpful...
-      sizeofbag = size(transfer(dummydata,dummychar))*sizeofchar
+!
+      sizeofbag = size(transfer(dummydata,dummychar))
 
 
 ! create the bag
 ! register the data within the bag, grabbing values from the options database
       call PetscBagRegisterInt(bag,data%nxc ,56,'nxc',                   &
      &      'nxc_variable help message',ierr)
-      call PetscBagRegisterRealArray(bag,data%rarray,3,'rarray',         &
+      call PetscBagRegisterRealArray(bag,data%rarray,three,'rarray',         &
      &      'rarray help message',ierr)
       call PetscBagRegisterScalar(bag,data%x ,103.2d0,'x',               &
      &      'x variable help message',ierr)
       call PetscBagRegisterBool(bag,data%t ,PETSC_TRUE,'t',              &
      &      't boolean help message',ierr)
-      call PetscBagRegisterBoolArray(bag,data%tarray,3,'tarray',         &
+      call PetscBagRegisterBoolArray(bag,data%tarray,three,'tarray',         &
      &      'tarray help message',ierr)
       call PetscBagRegisterString(bag,data%c,'hello','c',                &
      &      'string help message',ierr)

src/sys/objects/pinit.c

 */
 #undef __FUNCT__
 #define __FUNCT__ "PetscMaxSum"
-PetscErrorCode  PetscMaxSum(MPI_Comm comm,const PetscInt nprocs[],PetscInt *max,PetscInt *sum)
+PetscErrorCode  PetscMaxSum(MPI_Comm comm,const PetscInt sizes[],PetscInt *max,PetscInt *sum)
 {
   PetscMPIInt    size,rank;
   struct {PetscInt max,sum;} *work;
   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
   ierr = PetscMalloc(size*sizeof(*work),&work);CHKERRQ(ierr);
-  ierr = MPI_Allreduce((void*)nprocs,work,size,MPIU_2INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
+  ierr = MPI_Allreduce((void*)sizes,work,size,MPIU_2INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
   *max = work[rank].max;
   *sum = work[rank].sum;
   ierr = PetscFree(work);CHKERRQ(ierr);

src/ts/examples/tests/ex3.c

   stepsz[0] = 1.0/(2.0*(nz-1)*(nz-1)); /* (mesh_size)^2/2.0 */
   ftime     = 0.0;
   for (k=0; k<nphase; k++) {
-    if (nphase > 1) printf("Phase %d: initial time %g, stepsz %g, duration: %g\n",k,ftime,stepsz[k],(k+1)*T);
+    if (nphase > 1) printf("Phase %d: initial time %g, stepsz %g, duration: %g\n",(int)k,ftime,stepsz[k],(k+1)*T);
     ierr = TSSetInitialTimeStep(ts,ftime,stepsz[k]);CHKERRQ(ierr);
     ierr = TSSetDuration(ts,max_steps,(k+1)*T);CHKERRQ(ierr);
 

src/ts/examples/tests/ex5.c

 
   if (step%user->interval == 0) {
     ierr = VecGetArray(T,&array);CHKERRQ(ierr);
-    if (!rank) printf("step %4d, time %8.1f,  %6.4f, %6.4f, %6.4f, %6.4f, %6.4f, %6.4f\n",step,time,(((array[0]-273)*9)/5 + 32),(((array[1]-273)*9)/5 + 32),array[2],array[3],array[4],array[5]);
+    if (!rank) printf("step %4d, time %8.1f,  %6.4f, %6.4f, %6.4f, %6.4f, %6.4f, %6.4f\n",(int)step,time,(((array[0]-273)*9)/5 + 32),(((array[1]-273)*9)/5 + 32),array[2],array[3],array[4],array[5]);
     ierr = VecRestoreArray(T,&array);CHKERRQ(ierr);
   }
 

src/ts/examples/tutorials/ex31.c

   }
   ierr = PetscOptionsGetScalarArray(PETSC_NULL,"-yinit",y,&N,&flg);CHKERRQ(ierr);
   if ((N != GetSize(s)) && flg) {
-    printf("Error: number of initial values %d does not match problem size %d.\n",N,GetSize(s));
+    printf("Error: number of initial values %d does not match problem size %d.\n",(int)N,(int)GetSize(s));
   }
   ierr = VecRestoreArray(Y,&y);CHKERRQ(ierr);
   PetscFunctionReturn(0);
   PetscErrorCode  ierr;                       /* Error code                                           */
   char            ptype[256] = "hull1972a1";  /* Problem specification                                */
   PetscInt        n_refine   = 1;             /* Number of refinement levels for convergence analysis */
-  PetscReal     refine_fac = 2.0;           /* Refinement factor for dt                             */
+  PetscReal       refine_fac = 2.0;           /* Refinement factor for dt                             */
   PetscReal       dt_initial = 0.01;          /* Initial default value of dt                          */
   PetscReal       dt;
   PetscReal       tfinal     = 20.0;          /* Final time for the time-integration                  */
   PetscInt        maxiter    = 100000;        /* Maximum number of time-integration iterations        */
   PetscReal       *error;                     /* Array to store the errors for convergence analysis   */
-  PetscInt        nproc;                      /* No of processors                                     */
+  PetscMPIInt     size;                      /* No of processors                                     */
   PetscBool       flag;                       /* Flag denoting availability of exact solution         */
   PetscInt        r;
 
   PetscInitialize(&argc,&argv,(char*)0,help);
 
   /* Check if running with only 1 proc */
-  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&nproc);CHKERRQ(ierr);
-  if (nproc>1) SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_SUP,"Only for sequential runs");
+  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
+  if (size>1) SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_SUP,"Only for sequential runs");
 
   ierr = PetscOptionsString("-problem","Problem specification","<hull1972a1>",
                             ptype,ptype,sizeof(ptype),PETSC_NULL);CHKERRQ(ierr);

src/vec/is/ao/impls/memscalable/aomemscalable.c

   AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
   MPI_Comm          comm;
   PetscMPIInt       rank,size,tag1,tag2;
-  PetscInt          *owner,*start,*nprocs,nsends,nreceives;
+  PetscInt          *owner,*start,*sizes,nsends,nreceives;
   PetscInt          nmax,count,*sindices,*rindices,i,j,idx,lastidx,*sindices2,*rindices2;
   PetscInt          *owners = aomems->map->range;
   MPI_Request       *send_waits,*recv_waits,*send_waits2,*recv_waits2;
   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
 
   /*  first count number of contributors to each processor */
-  ierr = PetscMalloc2(2*size,PetscInt,&nprocs,size,PetscInt,&start);CHKERRQ(ierr);
-  ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
+  ierr = PetscMalloc2(2*size,PetscInt,&sizes,size,PetscInt,&start);CHKERRQ(ierr);
+  ierr = PetscMemzero(sizes,2*size*sizeof(PetscInt));CHKERRQ(ierr);
   ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
   ierr = PetscMemzero(owner,n*sizeof(PetscInt));CHKERRQ(ierr);
 
     lastidx = idx;
     for (; j<size; j++) {
       if (idx >= owners[j] && idx < owners[j+1]) {
-        nprocs[2*j]++;     /* num of indices to be sent */
-        nprocs[2*j+1] = 1; /* send to proc[j] */
+        sizes[2*j]++;     /* num of indices to be sent */
+        sizes[2*j+1] = 1; /* send to proc[j] */
         owner[i]      = j;
         break;
       }
     }
   }
-  nprocs[2*rank]=nprocs[2*rank+1]=0; /* do not receive from self! */
+  sizes[2*rank]=sizes[2*rank+1]=0; /* do not receive from self! */
   nsends        = 0;
-  for (i=0; i<size; i++) nsends += nprocs[2*i+1];
+  for (i=0; i<size; i++) nsends += sizes[2*i+1];
 
   /* inform other processors of number of messages and max length*/
-  ierr = PetscMaxSum(comm,nprocs,&nmax,&nreceives);CHKERRQ(ierr);
+  ierr = PetscMaxSum(comm,sizes,&nmax,&nreceives);CHKERRQ(ierr);
 
   /* allocate arrays */
   ierr = PetscObjectGetNewTag((PetscObject)ao,&tag1);CHKERRQ(ierr);
          the ith processor
   */
   start[0] = 0;
-  for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
+  for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
   for (i=0; i<n; i++) {
     j = owner[i];
     if (j != rank) {
   }
 
   start[0] = 0;
-  for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
+  for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
   for (i=0,count=0; i<size; i++) {
-    if (nprocs[2*i+1]) {
+    if (sizes[2*i+1]) {
       /* send my request to others */
-      ierr = MPI_Isend(sindices+start[i],nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+count);CHKERRQ(ierr);
+      ierr = MPI_Isend(sindices+start[i],sizes[2*i],MPIU_INT,i,tag1,comm,send_waits+count);CHKERRQ(ierr);
       /* post receive for the answer of my request */
-      ierr = MPI_Irecv(sindices2+start[i],nprocs[2*i],MPIU_INT,i,tag2,comm,recv_waits2+count);CHKERRQ(ierr);
+      ierr = MPI_Irecv(sindices2+start[i],sizes[2*i],MPIU_INT,i,tag2,comm,recv_waits2+count);CHKERRQ(ierr);
       count++;
     }
   }
   }
 
   /* free arrays */
-  ierr = PetscFree2(nprocs,start);CHKERRQ(ierr);
+  ierr = PetscFree2(sizes,start);CHKERRQ(ierr);
   ierr = PetscFree(owner);CHKERRQ(ierr);
   ierr = PetscFree2(rindices,recv_waits);CHKERRQ(ierr);
   ierr = PetscFree2(rindices2,recv_waits2);CHKERRQ(ierr);
   PetscLayout       map     = aomems->map;
   PetscInt          n_local = map->n,i,j;
   PetscMPIInt       rank,size,tag;
-  PetscInt          *owner,*start,*nprocs,nsends,nreceives;
+  PetscInt          *owner,*start,*sizes,nsends,nreceives;
   PetscInt          nmax,count,*sindices,*rindices,idx,lastidx;
   PetscInt          *owners = aomems->map->range;
   MPI_Request       *send_waits,*recv_waits;
   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
 
   /*  first count number of contributors (of from_array[]) to each processor */
-  ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
-  ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
+  ierr = PetscMalloc(2*size*sizeof(PetscInt),&sizes);CHKERRQ(ierr);
+  ierr = PetscMemzero(sizes,2*size*sizeof(PetscInt));CHKERRQ(ierr);
   ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
 
   j       = 0;
     lastidx = idx;
     for (; j<size; j++) {
       if (idx >= owners[j] && idx < owners[j+1]) {
-        nprocs[2*j]  += 2; /* num of indices to be sent - in pairs (ip,ia) */
-        nprocs[2*j+1] = 1; /* send to proc[j] */
+        sizes[2*j]  += 2; /* num of indices to be sent - in pairs (ip,ia) */
+        sizes[2*j+1] = 1; /* send to proc[j] */
         owner[i]      = j;
         break;
       }
     }
   }
-  nprocs[2*rank]=nprocs[2*rank+1]=0; /* do not receive from self! */
+  sizes[2*rank]=sizes[2*rank+1]=0; /* do not receive from self! */
   nsends        = 0;
-  for (i=0; i<size; i++) nsends += nprocs[2*i+1];
+  for (i=0; i<size; i++) nsends += sizes[2*i+1];
 
   /* inform other processors of number of messages and max length*/
-  ierr = PetscMaxSum(comm,nprocs,&nmax,&nreceives);CHKERRQ(ierr);
+  ierr = PetscMaxSum(comm,sizes,&nmax,&nreceives);CHKERRQ(ierr);
 
   /* allocate arrays */
   ierr = PetscObjectGetNewTag((PetscObject)ao,&tag);CHKERRQ(ierr);
          the ith processor
   */
   start[0] = 0;
-  for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
+  for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
   for (i=0; i<n; i++) {
     j = owner[i];
     if (j != rank) {
   }
 
   start[0] = 0;
-  for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
+  for (i=1; i<size; i++) start[i] = start[i-1] + sizes[2*i-2];
   for (i=0,count=0; i<size; i++) {
-    if (nprocs[2*i+1]) {
-      ierr = MPI_Isend(sindices+start[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count);CHKERRQ(ierr);
+    if (sizes[2*i+1]) {
+      ierr = MPI_Isend(sindices+start[i],sizes[2*i],MPIU_INT,i,tag,comm,send_waits+count);CHKERRQ(ierr);
       count++;
     }
   }
   ierr = PetscFree3(sindices,send_waits,send_status);CHKERRQ(ierr);
   ierr = PetscFree2(rindices,recv_waits);CHKERRQ(ierr);
   ierr = PetscFree(owner);CHKERRQ(ierr);
-  ierr = PetscFree(nprocs);CHKERRQ(ierr);
+  ierr = PetscFree(sizes);CHKERRQ(ierr);
   PetscFunctionReturn(0);
 }
 

src/vec/is/is/utils/ftn-custom/zisltogf.c

 
 static PetscInt  *sprocs, *snumprocs, **sindices;
 static PetscBool called;
-PETSC_EXTERN void PETSC_STDCALL islocaltoglobalmpnggetinfosize_(ISLocalToGlobalMapping *mapping,PetscInt *nprocs,PetscInt *maxnumprocs,PetscErrorCode *ierr)
+PETSC_EXTERN void PETSC_STDCALL islocaltoglobalmpnggetinfosize_(ISLocalToGlobalMapping *mapping,PetscInt *size,PetscInt *maxnumprocs,PetscErrorCode *ierr)
 {
   PetscInt i;
   if (called) {*ierr = PETSC_ERR_ARG_WRONGSTATE; return;}
-  *ierr        = ISLocalToGlobalMappingGetInfo(*mapping,nprocs,&sprocs,&snumprocs,&sindices); if (*ierr) return;
+  *ierr        = ISLocalToGlobalMappingGetInfo(*mapping,size,&sprocs,&snumprocs,&sindices); if (*ierr) return;
   *maxnumprocs = 0;
-  for (i=0; i<*nprocs; i++) *maxnumprocs = PetscMax(*maxnumprocs,snumprocs[i]);
+  for (i=0; i<*size; i++) *maxnumprocs = PetscMax(*maxnumprocs,snumprocs[i]);
   called = PETSC_TRUE;
 }
 
-PETSC_EXTERN void PETSC_STDCALL islocaltoglobalmappinggetinfo_(ISLocalToGlobalMapping *mapping,PetscInt *nprocs,PetscInt *procs,PetscInt *numprocs,
+PETSC_EXTERN void PETSC_STDCALL islocaltoglobalmappinggetinfo_(ISLocalToGlobalMapping *mapping,PetscInt *size,PetscInt *procs,PetscInt *numprocs,
                                                   PetscInt *indices,PetscErrorCode *ierr)
 {
   PetscInt i,j;
   if (!called) {*ierr = PETSC_ERR_ARG_WRONGSTATE; return;}
-  *ierr = PetscMemcpy(procs,sprocs,*nprocs*sizeof(PetscInt)); if (*ierr) return;
-  *ierr = PetscMemcpy(numprocs,snumprocs,*nprocs*sizeof(PetscInt)); if (*ierr) return;
-  for (i=0; i<*nprocs; i++) {
-    for (j=0; j<numprocs[i]; j++) indices[i + (*nprocs)*j] = sindices[i][j];
+  *ierr = PetscMemcpy(procs,sprocs,*size*sizeof(PetscInt)); if (*ierr) return;
+  *ierr = PetscMemcpy(numprocs,snumprocs,*size*sizeof(PetscInt)); if (*ierr) return;
+  for (i=0; i<*size; i++) {
+    for (j=0; j<numprocs[i]; j++) indices[i + (*size)*j] = sindices[i][j];
   }
-  *ierr  = ISLocalToGlobalMappingRestoreInfo(*mapping,nprocs,&sprocs,&snumprocs,&sindices); if (*ierr) return;
+  *ierr  = ISLocalToGlobalMappingRestoreInfo(*mapping,size,&sprocs,&snumprocs,&sindices); if (*ierr) return;
   called = PETSC_FALSE;
 }

src/vec/vec/examples/tests/ex37.c

 #define __FUNCT__ "gen_test_vector"
 PetscErrorCode gen_test_vector(MPI_Comm comm, PetscInt length, PetscInt start_value, PetscInt stride, Vec *_v)
 {
-  int            nproc;
+  int            size;
   Vec            v;
   PetscInt       i;
   PetscScalar    vx;
   PetscErrorCode ierr;
 
-  MPI_Comm_size(comm, &nproc);
+  MPI_Comm_size(comm, &size);
 
   ierr = VecCreate(comm, &v);CHKERRQ(ierr);
   ierr = VecSetSizes(v, PETSC_DECIDE, length);CHKERRQ(ierr);
-  if (nproc == 1) { ierr = VecSetType(v, VECSEQ);CHKERRQ(ierr); }
+  if (size == 1) { ierr = VecSetType(v, VECSEQ);CHKERRQ(ierr); }
   else { ierr = VecSetType(v, VECMPI);CHKERRQ(ierr); }
 
   for (i=0; i<length; i++) {

src/vec/vec/examples/tutorials/ex18.c

 int main(int argc,char **argv)
 {
   PetscErrorCode ierr;
-  PetscMPIInt    rank,nproc;
+  PetscMPIInt    rank,size;
   PetscInt       rstart,rend,i,k,N,numPoints=1000000;
   PetscScalar    dummy,result=0,h=1.0/numPoints,*xarray;
   Vec            x,xend;
 
   PetscInitialize(&argc,&argv,(char*)0,help);
   ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
-  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&nproc);CHKERRQ(ierr);
+  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
 
   /*
      Create a parallel vector.
   if (!rank) {
     i    = 0;
     ierr = VecSetValues(xend,1,&i,&result,INSERT_VALUES);CHKERRQ(ierr);
-  } else if (rank == nproc) {
+  } else if (rank == size) {
     i    = N-1;
     ierr = VecSetValues(xend,1,&i,&result,INSERT_VALUES);CHKERRQ(ierr);
   }