Commits

BarryFSmith committed eb9baa1

replaced all left-over uses of a single PetscMalloc() to allocated multiple arrays: replaced with PetscMallocN()
The only ones left are when the second array is set into the first array and one ugly usage in the MUMPS interface that cannot be easily fixed

Comments (0)

Files changed (19)

src/ksp/ksp/impls/gmres/agmres/agmresleja.c

   PetscScalar    *n_cmpl,temp;
   PetscErrorCode ierr;
   PetscInt       i, pos, j;
-  ierr = PetscMalloc(m*sizeof(PetscScalar), &n_cmpl);CHKERRQ(ierr);
-  ierr = PetscMalloc(m*sizeof(PetscInt), &spos);CHKERRQ(ierr);
 
   PetscFunctionBegin;
+  ierr = PetscMalloc(m*sizeof(PetscScalar), &n_cmpl);CHKERRQ(ierr);
+  ierr = PetscMalloc(m*sizeof(PetscInt), &spos);CHKERRQ(ierr);
   /* Check the proper order of complex conjugate pairs */
   j = 0;
   while (j  < m) {
   }
 
   for (i = 0; i < m; i++) n_cmpl[i] = PetscSqrtReal(re[i]*re[i]+im[i]*im[i]);
-  KSPAGMRESLejafmaxarray(n_cmpl, 0, m, &pos);
+  ierr = KSPAGMRESLejafmaxarray(n_cmpl, 0, m, &pos);CHKERRQ(ierr);
   j = 0;
   if (im[pos] >= 0.0) {
     rre[0] = re[pos];
       spos[j] = pos + 1;
       j++;
     }
-    KSPAGMRESLejaCfpdMax(re, im, spos, j, m, &pos);
+    ierr = KSPAGMRESLejaCfpdMax(re, im, spos, j, m, &pos);CHKERRQ(ierr);
     if (im[pos] < 0) pos--;
 
     if ((im[pos] >= 0) && (j < m)) {

src/ksp/ksp/interface/ams/kspams.c

     mon->amem = -1;
   }
   ierr      = PetscViewerDestroy(&mon->viewer);CHKERRQ(ierr);
-  ierr      = PetscFree(mon->eigr);CHKERRQ(ierr);
-  mon->eigi = NULL;
+  ierr      = PetscFree2(mon->eigr,mon->eigi);CHKERRQ(ierr);
   ierr      = PetscFree(*ctx);CHKERRQ(ierr);
   PetscFunctionReturn(0);
 }
   mon->amem = -1;
 
   ierr      = PetscFree(mon->eigr);CHKERRQ(ierr);
-  ierr      = PetscMalloc(2*n*sizeof(PetscReal),&mon->eigr);CHKERRQ(ierr);
-  mon->eigi = mon->eigr + n;
+  ierr      = PetscMalloc2(n,PetscReal,&mon->eigr,n,PetscReal,&mon->eigi);CHKERRQ(ierr);
   if (n) {ierr = KSPComputeEigenvalues(ksp,n,mon->eigr,mon->eigi,&mon->neigs);CHKERRQ(ierr);}
 
   ierr = PetscViewerAMSGetAMSComm(viewer,&acomm);CHKERRQ(ierr);

src/ksp/ksp/interface/eige.c

 
     idummy   = n;
     lwork    = 5*n;
-    ierr     = PetscMalloc(2*n*sizeof(PetscReal),&realpart);CHKERRQ(ierr);
-    imagpart = realpart + n;
+    ierr     = PetscMalloc2(n,PetscReal,&realpart,n,PetscReal,&imagpart);CHKERRQ(ierr);
     ierr     = PetscMalloc(5*n*sizeof(PetscReal),&work);CHKERRQ(ierr);
 #if defined(PETSC_MISSING_LAPACK_GEEV)
     SETERRQ(PetscObjectComm((PetscObject)ksp),PETSC_ERR_SUP,"GEEV - Lapack routine is unavailable\nNot able to provide eigen values.");
       c[i] = imagpart[perm[i]];
     }
     ierr = PetscFree(perm);CHKERRQ(ierr);
-    ierr = PetscFree(realpart);CHKERRQ(ierr);
+    ierr = PetscFree2(realpart,imagpart);CHKERRQ(ierr);
   }
 #else
   if (!rank) {

src/ksp/ksp/interface/itfunc.c

     if (!nits) {
       ierr = PetscPrintf(PetscObjectComm((PetscObject)ksp),"Zero iterations in solver, cannot approximate any eigenvalues\n");CHKERRQ(ierr);
     } else {
-      ierr = PetscMalloc(2*n*sizeof(PetscReal),&r);CHKERRQ(ierr);
-      c    = r + n;
+      ierr = PetscMalloc2(n,PetscReal,&r,n,PetscReal,&c);CHKERRQ(ierr);
       ierr = KSPComputeEigenvalues(ksp,n,r,c,&neig);CHKERRQ(ierr);
       if (flag1) {
         ierr = PetscPrintf(PetscObjectComm((PetscObject)ksp),"Iteratively computed eigenvalues\n");CHKERRQ(ierr);
       if (flag3 && !rank) {
         ierr = KSPPlotEigenContours_Private(ksp,neig,r,c);CHKERRQ(ierr);
       }
-      ierr = PetscFree(r);CHKERRQ(ierr);
+      ierr = PetscFree2(r,c);CHKERRQ(ierr);
     }
   }
 

src/ksp/pc/impls/bddc/bddcfetidp.c

     partial_sum += pcis->n_shared[i];
     ptrs_buffer[i] = ptrs_buffer[i-1]+pcis->n_shared[i];
   }
-  ierr = PetscMalloc( partial_sum*sizeof(PetscScalar),&send_buffer);CHKERRQ(ierr);
-  ierr = PetscMalloc( partial_sum*sizeof(PetscScalar),&recv_buffer);CHKERRQ(ierr);
-  ierr = PetscMalloc( partial_sum*sizeof(PetscScalar),&all_factors[0]);CHKERRQ(ierr);
+  ierr = PetscMalloc(partial_sum*sizeof(PetscScalar),&send_buffer);CHKERRQ(ierr);
+  ierr = PetscMalloc(partial_sum*sizeof(PetscScalar),&recv_buffer);CHKERRQ(ierr);
+  ierr = PetscMalloc(partial_sum*sizeof(PetscScalar),&all_factors[0]);CHKERRQ(ierr);
   for (i=0;i<pcis->n-1;i++) {
     j = mat_graph->count[i];
     all_factors[i+1]=all_factors[i]+j;
   }
   /* scatter B scaling to N vec */
   ierr = VecScatterBegin(pcis->N_to_B,pcis->D,pcis->vec1_N,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
-  ierr = VecScatterEnd  (pcis->N_to_B,pcis->D,pcis->vec1_N,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
+  ierr = VecScatterEnd(pcis->N_to_B,pcis->D,pcis->vec1_N,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
   /* communications */
   ierr = VecGetArray(pcis->vec1_N,&array);CHKERRQ(ierr);
   for (i=1;i<pcis->n_neigh;i++) {

src/ksp/pc/impls/bddc/bddcgraph.c

     graph->xadj = new_xadj;
     graph->adjncy = new_adjncy;
   }
-    
+
   /* mark special nodes -> each will become a single node equivalence class */
   if (custom_primal_vertices) {
     ierr = ISGetSize(custom_primal_vertices,&is_size);CHKERRQ(ierr);

src/ksp/pc/impls/gamg/gamg.c

 
   /* get basic dims */
   ierr = MatGetBlockSize(Pmat, &bs);CHKERRQ(ierr);
-  
+
   ierr = MatGetSize(Pmat, &M, &qq);CHKERRQ(ierr);
   if (pc_gamg->verbose) {
     PetscInt NN = M;

src/ksp/pc/impls/is/nn/nn.c

   {
     PetscMPIInt tag;
     ierr         = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
-    ierr         = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
-    recv_request = send_request + (n_neigh);
+    ierr         = PetscMalloc2(n_neigh+1,MPI_Request,&send_request,n_neigh+1,MPI_Request,&recv_request);CHKERRQ(ierr);
     for (i=1; i<n_neigh; i++) {
       ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(send_request[i]));CHKERRQ(ierr);
       ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,PetscObjectComm((PetscObject)pc),&(recv_request[i]));CHKERRQ(ierr);
   }
 
   /* Free the memory for the MPI requests */
-  ierr = PetscFree(send_request);CHKERRQ(ierr);
+  ierr = PetscFree2(send_request,recv_request);CHKERRQ(ierr);
 
   /* Free the memory for DZ_OUT */
   if (DZ_OUT) {

src/ksp/pc/impls/is/pcis.c

     for (i=0;i<pcis->n_neigh;i++)
       for (j=0;j<pcis->n_shared[i];j++)
           array[pcis->shared[i][j]] += 1;
- 
+
     ierr = PetscMalloc(pcis->n*sizeof(PetscInt),&idx_I_local);CHKERRQ(ierr);
     ierr = PetscMalloc(pcis->n*sizeof(PetscInt),&idx_B_local);CHKERRQ(ierr);
     for (i=0, pcis->n_B=0, n_I=0; i<pcis->n; i++) {

src/mat/impls/aij/mpi/mpiov.c

       ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr);
     }
   }
-  ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr);
-  ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr);
   ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr);
   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
+  ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr);
+  ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr);
 
   /* Form the matrix */
   /* create col map: global col of C -> local col of submatrices */

src/mat/impls/aij/mpi/mumps/mumps.c

   output:
     nnz     - dim of r, c, and v (number of local nonzero entries of A)
     r, c, v - row and col index, matrix values (matrix triples)
+
+  The returned values r, c, and sometimes v are obtained in a single PetscMalloc(). Then in MatDestroy_MUMPS() it is
+  freed with PetscFree((mumps->irn);  This is not ideal code, the fact that v is ONLY sometimes part of mumps->irn means
+  that the PetscMalloc() cannot easily be replaced with a PetscMalloc3(). 
+
  */
 
 #undef __FUNCT__

src/mat/impls/aij/seq/inode.c

   ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
   ierr = ISGetIndices(isicol,&ic);CHKERRQ(ierr);
 
-  ierr  = PetscMalloc((4*n+1)*sizeof(PetscScalar),&rtmp1);CHKERRQ(ierr);
-  ierr  = PetscMemzero(rtmp1,(4*n+1)*sizeof(PetscScalar));CHKERRQ(ierr);
-  rtmp2 = rtmp1 + n;
-  rtmp3 = rtmp2 + n;
-  rtmp4 = rtmp3 + n;
+  ierr  = PetscMalloc4(n,PetscScalar,&rtmp1,n,PetscScalar,&rtmp2,n,PetscScalar,&rtmp3,n,PetscScalar,&rtmp4);CHKERRQ(ierr);
+  ierr  = PetscMemzero(rtmp1,n*sizeof(PetscScalar));CHKERRQ(ierr);
+  ierr  = PetscMemzero(rtmp2,n*sizeof(PetscScalar));CHKERRQ(ierr);
+  ierr  = PetscMemzero(rtmp3,n*sizeof(PetscScalar));CHKERRQ(ierr);
+  ierr  = PetscMemzero(rtmp4,n*sizeof(PetscScalar));CHKERRQ(ierr);
   ics   = ic;
 
   node_max = a->inode.node_count;
     }
   } while (sctx.newshift);
 
-  ierr = PetscFree(rtmp1);CHKERRQ(ierr);
+  ierr = PetscFree4(rtmp1,rtmp2,rtmp3,rtmp4);CHKERRQ(ierr);
   ierr = PetscFree(tmp_vec2);CHKERRQ(ierr);
   ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr);
   ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
   ierr   = ISGetIndices(isrow,&r);CHKERRQ(ierr);
   ierr   = ISGetIndices(iscol,&c);CHKERRQ(ierr);
   ierr   = ISGetIndices(isicol,&ic);CHKERRQ(ierr);
-  ierr   = PetscMalloc((3*n+1)*sizeof(PetscScalar),&rtmp11);CHKERRQ(ierr);
-  ierr   = PetscMemzero(rtmp11,(3*n+1)*sizeof(PetscScalar));CHKERRQ(ierr);
+  ierr   = PetscMalloc3(n,PetscScalar,&rtmp11,n,PetscScalar,&rtmp22,n,PetscScalar,&rtmp33);CHKERRQ(ierr);
+  ierr   = PetscMemzero(rtmp11,n*sizeof(PetscScalar));CHKERRQ(ierr);
+  ierr   = PetscMemzero(rtmp22,n*sizeof(PetscScalar));CHKERRQ(ierr);
+  ierr   = PetscMemzero(rtmp33,n*sizeof(PetscScalar));CHKERRQ(ierr);
   ics    = ic;
-  rtmp22 = rtmp11 + n;
-  rtmp33 = rtmp22 + n;
 
   node_max = a->inode.node_count;
   ns       = a->inode.size;
     }
 endofwhile:;
   } while (sctx.newshift);
-  ierr = PetscFree(rtmp11);CHKERRQ(ierr);
+  ierr = PetscFree3(rtmp11,rtmp22,rtmp33);CHKERRQ(ierr);
   ierr = PetscFree(tmp_vec2);CHKERRQ(ierr);
   ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr);
   ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);

src/mat/impls/aij/seq/matmatmult.c

   PetscLogDouble      flops=0.0;
   MatScalar           *aa  =a->a,*aval,*ba=b->a,*bval,*ca,*cval;
   Mat_MatMatTransMult *abt = c->abt;
-  
+
   PetscFunctionBegin;
   /* clear old values in C */
   if (!c->a) {
 
     /* C_dense = A*Bt_dense */
     ierr = MatMatMultNumeric_SeqAIJ_SeqDense(A,Bt_dense,C_dense);CHKERRQ(ierr);
-   
+
     /* Recover C from C_dense */
     ierr = MatTransColoringApplyDenToSp(matcoloring,C_dense,C);CHKERRQ(ierr);
     PetscFunctionReturn(0);
   Nbs       = mat->cmap->N/bs;
   c->M      = mat->rmap->N/bs;  /* set total rows, columns and local rows */
   c->N      = Nbs;
-  c->m      = c->M; 
+  c->m      = c->M;
   c->rstart = 0;
   c->brows  = 100;
 
   if (brows > 0) {
     ierr = PetscMalloc((nis+1)*sizeof(PetscInt),&c->lstart);CHKERRQ(ierr);
   }
-  
+
   colorforrow[0] = 0;
   rows_i         = rows;
   den2sp_i       = den2sp;

src/mat/impls/aij/seq/matptap.c

 
   PetscFunctionBegin;
   /* Allocate temporary array for storage of one row of A*P (cn: non-scalable) */
-  ierr = PetscMalloc(cn*(sizeof(MatScalar)+sizeof(PetscInt))+c->rmax*sizeof(PetscInt),&apa);CHKERRQ(ierr);
-
-  apjdense = (PetscInt*)(apa + cn);
-  apj      = apjdense + cn;
-  ierr     = PetscMemzero(apa,cn*(sizeof(MatScalar)+sizeof(PetscInt)));CHKERRQ(ierr);
+  ierr = PetscMalloc3(cn,MatScalar,&apa,cn,PetscInt,&apjdense,c->rmax,PetscInt,&apj);CHKERRQ(ierr);
+  ierr = PetscMemzero(apa,cn*sizeof(MatScalar));CHKERRQ(ierr);
+  ierr = PetscMemzero(apjdense,cn*sizeof(PetscInt));CHKERRQ(ierr);
 
   /* Clear old values in C */
   ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr);
   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
 
-  ierr = PetscFree(apa);CHKERRQ(ierr);
+  ierr = PetscFree3(apa,apjdense,apj);CHKERRQ(ierr);
   PetscFunctionReturn(0);
 }
 

src/mat/impls/baij/mpi/mpb_baij.c

   ierr = MPI_Comm_rank(subComm,&subCommRank);CHKERRQ(ierr);
   ierr = PetscMalloc(subCommSize*sizeof(PetscMPIInt),&commRankMap);CHKERRQ(ierr);
   ierr = MPI_Allgather(&commRank,1,MPI_INT,commRankMap,1,MPI_INT,subComm);CHKERRQ(ierr);
-  
+
   /* Traverse garray and identify blocked column indices [of offdiag mat] that
    should be discarded. For the ones not discarded, store the newCol+1
    value in garrayCMap */

src/mat/impls/baij/mpi/mpibaij.c

     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
 
     /* next, compute all the lengths */
-    ierr  = PetscMalloc((2*m+1)*sizeof(PetscInt),&dlens);CHKERRQ(ierr);
-    olens = dlens + m;
+    ierr  = PetscMalloc2(m+1,PetscInt,&dlens,m+1,PetscInt,&olens);CHKERRQ(ierr);
     for (i=0; i<m; i++) {
       jend = ii[i+1] - ii[i];
       olen = 0;
     ierr = MatSetSizes(M,bs*m,bs*nlocal,PETSC_DECIDE,bs*n);CHKERRQ(ierr);
     ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr);
     ierr = MatMPIBAIJSetPreallocation(M,bs,0,dlens,0,olens);CHKERRQ(ierr);
-    ierr = PetscFree(dlens);CHKERRQ(ierr);
+    ierr = PetscFree2(dlens,olens);CHKERRQ(ierr);
   } else {
     PetscInt ml,nl;
 

src/sys/utils/mpits.c

   ierr     = MPI_Type_size(dtype,&unitbytes);CHKERRQ(ierr);
   ierr     = PetscMalloc(nrecvs*count*unitbytes,&fdata);CHKERRQ(ierr);
   tdata    = (char*)todata;
-  ierr     = PetscMalloc2(nto+nrecvs,MPI_Request,&reqs,nto+nrecvs,MPI_Status,&statuses);CHKERRQ(ierr);
-  sendreqs = reqs + nrecvs;
+  ierr     = PetscMalloc3(nrecvs,MPI_Request,&reqs,nto,MPI_Request,&sendreqs,nto+nrecvs,MPI_Status,&statuses);CHKERRQ(ierr);
   for (i=0; i<nrecvs; i++) {
     ierr = MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);CHKERRQ(ierr);
   }
   ierr = MPI_Waitall(nto+nrecvs,reqs,statuses);CHKERRQ(ierr);
   ierr = PetscMalloc(nrecvs*sizeof(PetscMPIInt),&franks);CHKERRQ(ierr);
   for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE;
-  ierr = PetscFree2(reqs,statuses);CHKERRQ(ierr);
+  ierr = PetscFree3(reqs,sendreqs,statuses);CHKERRQ(ierr);
 
   *nfrom            = nrecvs;
   *fromranks        = franks;

src/vec/is/sf/impls/basic/sfbasic.c

   /* Send leaf identities to roots */
   for (i=0,bas->itotal=0; i<bas->niranks; i++) bas->itotal += ilengths[i];
   ierr = PetscMalloc2(bas->niranks+1,PetscInt,&bas->ioffset,bas->itotal,PetscInt,&bas->irootloc);CHKERRQ(ierr);
-  ierr = PetscMalloc((bas->niranks+sf->nranks)*sizeof(MPI_Request),&rootreqs);CHKERRQ(ierr);
-
-  leafreqs = rootreqs + bas->niranks;
+  ierr = PetscMalloc2(bas->niranks,MPI_Request,&rootreqs,sf->nranks,MPI_Request,&leafreqs);CHKERRQ(ierr);
   bas->ioffset[0] = 0;
   for (i=0; i<bas->niranks; i++) {
     bas->ioffset[i+1] = bas->ioffset[i] + ilengths[i];
     ierr = PetscMPIIntCast(sf->roffset[i+1] - sf->roffset[i],&npoints);CHKERRQ(ierr);
     ierr = MPI_Isend(sf->rremote+sf->roffset[i],npoints,MPIU_INT,sf->ranks[i],bas->tag,comm,&leafreqs[i]);CHKERRQ(ierr);
   }
-  ierr = MPI_Waitall(sf->nranks+bas->niranks,rootreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
+  ierr = MPI_Waitall(bas->niranks,rootreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
+  ierr = MPI_Waitall(sf->nranks,leafreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
  1. Jed Brown

    I think that doing two MPI_Waitall calls here is a pessimization and would like to revert it. Also, why did you merge this to 'master' before merging to 'next'?

    1. BarryFSmith author

      I don't see that. If it was wait any then yes you want them all together. But any half way decent MPI would be processing everything as they come in and so calling them in any order would not matter?

      If some MPI god told you that yes this new will kill performance than you also need to change the code so only a single variable name is used for the entire array and not have the "trick" of the second pointer sticking in half way down the first pointer's space. That is not good coding practice, confusing and bug prone.

   ierr = PetscFree(ilengths);CHKERRQ(ierr);
-  ierr = PetscFree(rootreqs);CHKERRQ(ierr);
+  ierr = PetscFree2(rootreqs,leafreqs);CHKERRQ(ierr);
   PetscFunctionReturn(0);
 }
 

src/vec/vec/impls/mpi/pdvec.c

   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)xin),&size);CHKERRQ(ierr);
   if (!rank) {
     ierr = PetscDrawLGReset(lg);CHKERRQ(ierr);
-    ierr = PetscMalloc(2*(N+1)*sizeof(PetscReal),&xx);CHKERRQ(ierr);
+    ierr = PetscMalloc2(N,PetscReal,&xx,N,PetscReal,&yy);CHKERRQ(ierr);
     for (i=0; i<N; i++) xx[i] = (PetscReal) i;
-    yy   = xx + N;
     ierr = PetscMalloc(size*sizeof(PetscInt),&lens);CHKERRQ(ierr);
     for (i=0; i<size; i++) lens[i] = xin->map->range[i+1] - xin->map->range[i];
-    
+
 #if !defined(PETSC_USE_COMPLEX)
     ierr = MPI_Gatherv((void*)xarray,xin->map->n,MPIU_REAL,yy,lens,xin->map->range,MPIU_REAL,0,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
 #else
 #endif
     ierr = PetscFree(lens);CHKERRQ(ierr);
     ierr = PetscDrawLGAddPoints(lg,N,&xx,&yy);CHKERRQ(ierr);
-    ierr = PetscFree(xx);CHKERRQ(ierr);
+    ierr = PetscFree2(xx,yy);CHKERRQ(ierr);
   } else {
 #if !defined(PETSC_USE_COMPLEX)
     ierr = MPI_Gatherv((void*)xarray,xin->map->n,MPIU_REAL,0,0,0,MPIU_REAL,0,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);