Commits

Rio Yokota committed 442b9cc

wrapper.cxx now works.

  • Participants
  • Parent commits 3109d24

Comments (0)

Files changed (4)

examples/Makefile

 	mpirun -np 32 ./a.out
 
 wrapper: wrapper.cxx $(KERNELS)
-	make -C ../wrapper libcoulomb.a
+	make -C ../wrappers libcoulomb.a
 	$(CXX) $? $(LFLAGS) -lcoulomb
 	mpirun -np 4 ./a.out

examples/wrapper.cxx

   const int irecv = (mpirank - 1 + mpisize) % mpisize;
   MPI_Request sreq, rreq;
 
-  MPI_Isend(var,n,MPI_DOUBLE,irecv,1,MPI_COMM_WORLD,&sreq);
-  MPI_Irecv(buf,n,MPI_DOUBLE,isend,1,MPI_COMM_WORLD,&rreq);
-  MPI_Wait(&sreq,MPI_STATUS_IGNORE);
-  MPI_Wait(&rreq,MPI_STATUS_IGNORE);
+  MPI_Isend(var, n, MPI_DOUBLE, irecv, 1, MPI_COMM_WORLD, &sreq);
+  MPI_Irecv(buf, n, MPI_DOUBLE, isend, 1, MPI_COMM_WORLD, &rreq);
+  MPI_Wait(&sreq, MPI_STATUS_IGNORE);
+  MPI_Wait(&rreq, MPI_STATUS_IGNORE);
   int i;
   for( i=0; i!=n; ++i ) {
     var[i] = buf[i];
 int main(int argc, char **argv) {
   MPI_Init(&argc,&argv);
   int mpisize, mpirank;
-  MPI_Comm_size(MPI_COMM_WORLD,&mpisize);
-  MPI_Comm_rank(MPI_COMM_WORLD,&mpirank);
+  MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
+  MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
   const int N = 1000000;
   const double size = 2 * M_PI;
   double *xi     = new double [3*N];
     xj[3*i+2] = xi[3*i+2];
   }
   for( int irank=0; irank!=mpisize; ++irank ) {
-    MPI_Shift(xj,3*N,mpisize,mpirank);
-    MPI_Shift(qj,N,mpisize,mpirank);
+    MPI_Shift(xj, 3*N, mpisize, mpirank);
+    MPI_Shift(qj, N, mpisize, mpirank);
     for( int i=0; i!=100; ++i ) {
       double P = 0, Fx = 0, Fy = 0, Fz = 0;
       for( int j=0; j!=N; ++j ) {
 #ifndef mympi_h
 #define mympi_h
 #include <mpi.h>
+#include <cmath>
+#include <iostream>
 #include <typeinfo>
 
 //! Custom MPI utilities

wrappers/coulomb.cxx

 #include "parallelfmm.h"
 
 extern "C" void FMMcalccoulomb(int n, double* x, double* q, double *p, double* f, int periodicflag) {
-  IMAGES = ((periodicflag & 0x1) == 0) ? 0 : 3;
-  THETA = 0.6;
   Bodies bodies, jbodies;
   Cells cells, jcells;
   ParallelFMM FMM;
-  FMM.printNow = MPIRANK == 0;
+  FMM.NCRIT = 10;
+  FMM.NSPAWN = 1000;
+  FMM.IMAGES = ((periodicflag & 0x1) == 0) ? 0 : 3;
+  FMM.THETA = 0.6;
+  FMM.printNow = FMM.MPIRANK == 0;
 
   bodies.resize(n);
   for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
   }
 
   FMM.partition(bodies);
-  FMM.buildTree(bodies,cells);
+  FMM.buildTree(bodies, cells);
   FMM.upwardPass(cells);
   FMM.setLET(cells);
   FMM.commBodies();
   FMM.commCells();
-  FMM.evaluate(cells,cells);
+  FMM.evaluate(cells, cells);
   jbodies = bodies;
-  for( int irank=1; irank<MPISIZE; irank++ ) {
-    FMM.getLET(jcells,(MPIRANK+irank)%MPISIZE);
-    FMM.evaluate(cells,jcells);
+  for( int irank=1; irank<FMM.MPISIZE; irank++ ) {
+    FMM.getLET(jcells, (FMM.MPIRANK + irank) % FMM.MPISIZE);
+    FMM.evaluate(cells, jcells);
   }
   FMM.downwardPass(cells);
   FMM.unpartition(bodies);