enzo-3.0 / src / enzo / grid / utilities / Grid_DepositParticlePositions.C

The active_particles branch has multiple heads

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
/***********************************************************************
/
/  GRID CLASS (DEPOSIT PARTICLE POSITIONS ONTO THE SPECIFIED FIELD)
/
/  written by: Greg Bryan
/  date:       May, 1995
/  modified1:
/
/  PURPOSE:
/     This routine deposits the particle living in this grid into either
/       the GravitatingMassField or the GravitatingMassFieldParticles of
/       the TargetGrid, depending on the value of DepositField.
/     It also moves the particles in this grid so they are at the correct
/       time and adjusts their 'mass' to be appropriate for TargetGrid
/       (since particle 'mass' changed from level to level).
/
/  NOTE:
/
************************************************************************/

#ifdef USE_MPI
#include "communicators.h"
#endif /* USE_MPI */

#include "preincludes.h"
#include "ErrorExceptions.h"
#include "macros_and_parameters.h"
#include "typedefs.h"
#include "global_data.h"
#include "Fluxes.h"
#include "GridList.h"
#include "ExternalBoundary.h"
#include "Grid.h"
#include "ActiveParticle.h"
#include "communication.h"
 
/* function prototypes */
 
extern "C" void PFORTRAN_NAME(cic_deposit)(FLOAT *posx, FLOAT *posy,
			FLOAT *posz, int *ndim, int *npositions,
                        float *densfield, float *field, FLOAT *leftedge,
			int *dim1, int *dim2, int *dim3, float *cellsize, 
			int *addaspoints);
 
extern "C" void PFORTRAN_NAME(smooth_deposit)(FLOAT *posx, FLOAT *posy,
			FLOAT *posz, int *ndim, int *npositions,
                        float *densfield, float *field, FLOAT *leftedge,
                        int *dim1, int *dim2, int *dim3, float *cellsize,
			float *rsmooth);

#ifdef USE_MPI
int CommunicationBufferedSend(void *buffer, int size, MPI_Datatype Type, 
                              int Target, int Tag, MPI_Comm CommWorld, 
			      int BufferSize);
#endif /* USE_MPI */
double ReturnWallTime(void);

/* This controls the maximum particle mass which will be deposited in
   the MASS_FLAGGING_FIELD.  Only set in Grid_SetFlaggingField. */
 
float DepositParticleMaximumParticleMass = 0;
 
int grid::DepositParticlePositions(grid *TargetGrid, FLOAT DepositTime,
				   int DepositField)
{
 
  /* Return if this doesn't concern us. */
 
  if (TargetGrid->CommunicationMethodShouldExit(this) ||
      (NumberOfParticles == 0 && NumberOfActiveParticles == 0))
    return SUCCESS;
 
//  fprintf(stderr, "----DPP: MyPN = %"ISYM", PN = %"ISYM", TGPN = %"ISYM", DIR (R=1,S=2) = %"ISYM", NP = %"ISYM"\n",
//	  MyProcessorNumber, ProcessorNumber, TargetGrid->ProcessorNumber, CommunicationDirection, NumberOfParticles);
 
  /* Declarations. */
 
  int dim, i, j, k, size, index1, index2;
  int Dimension[] = {1,1,1};
  int OriginalDimension[] = {1,1,1};
  int Offset[] = {0,0,0};
  float MassFactor = 1.0, *ParticleMassTemp, *ParticleMassPointer, CellSize;
  float *ParticleMassPointerSink;    
  float TimeDifference = 0;
  FLOAT LeftEdge[MAX_DIMENSION], OriginalLeftEdge[MAX_DIMENSION];
  float *DepositFieldPointer, *OriginalDepositFieldPointer;

  /* 1) GravitatingMassField. */
 
  if (DepositField == GRAVITATING_MASS_FIELD) {
    if (TargetGrid->GravitatingMassFieldCellSize <= 0)
      TargetGrid->InitializeGravitatingMassField(RefineBy);
    DepositFieldPointer = TargetGrid->GravitatingMassField;
    CellSize            = float(TargetGrid->GravitatingMassFieldCellSize);
    for (dim = 0; dim < GridRank; dim++) {
      LeftEdge[dim]  = TargetGrid->GravitatingMassFieldLeftEdge[dim];
      Dimension[dim] = TargetGrid->GravitatingMassFieldDimension[dim];
    }
  }
 
  /* 2) GravitatingMassFieldParticles. */
 
  else if (DepositField == GRAVITATING_MASS_FIELD_PARTICLES) {
    if (TargetGrid->GravitatingMassFieldParticlesCellSize <= 0)
      TargetGrid->InitializeGravitatingMassFieldParticles(RefineBy);
    DepositFieldPointer = TargetGrid->GravitatingMassFieldParticles;
    CellSize            = float(TargetGrid->GravitatingMassFieldParticlesCellSize);
    for (dim = 0; dim < GridRank; dim++) {
      LeftEdge[dim]  = TargetGrid->GravitatingMassFieldParticlesLeftEdge[dim];
      Dimension[dim] = TargetGrid->GravitatingMassFieldParticlesDimension[dim];
    }
  }
 
  /* 3) MassFlaggingField */
 
  else if (DepositField == MASS_FLAGGING_FIELD) {
    DepositFieldPointer = TargetGrid->MassFlaggingField;
    CellSize            = float(TargetGrid->CellWidth[0][0]);
    for (dim = 0; dim < GridRank; dim++) {
      LeftEdge[dim]  = TargetGrid->CellLeftEdge[dim][0];
      Dimension[dim] = TargetGrid->GridDimension[dim];
    }
  }

  /* 4) ParticleMassFlaggingField */
 
//  else if (DepositField == PARTICLE_MASS_FLAGGING_FIELD) {
//    DepositFieldPointer = TargetGrid->ParticleMassFlaggingField;
//    CellSize            = float(TargetGrid->CellWidth[0][0]);
//    for (dim = 0; dim < GridRank; dim++) {
//      LeftEdge[dim]  = TargetGrid->CellLeftEdge[dim][0];
//      Dimension[dim] = TargetGrid->GridDimension[dim];
//    }
//  }
 
  /* 5) error */
 
  else {
    ENZO_VFAIL("DepositField = %"ISYM" not recognized.\n", DepositField)
  }  

  /* If on different processors, generate a temporary field to hold
     the density. */

  if (ProcessorNumber != TargetGrid->ProcessorNumber) {

    /* If this is the target grid processor, then record the orginal
       field characteristics so we can add it in when the data arrives. */

    for (dim = 0; dim < GridRank; dim++) {
      OriginalLeftEdge[dim] = LeftEdge[dim];
      OriginalDimension[dim] = Dimension[dim];
    }
    OriginalDepositFieldPointer = DepositFieldPointer;

    /* Resize the deposit region so it is just big enough to contain the
       grid where the particles reside. */

    size = 1;
    for (dim = 0; dim < GridRank; dim++) {
      LeftEdge[dim] = (int(GridLeftEdge[dim]/CellSize)-2)*CellSize;
      Offset[dim] = nint((LeftEdge[dim] - OriginalLeftEdge[dim])/CellSize);
      if (Offset[dim] < 0) {
	fprintf(stderr, "P(%d)(1): dx=%"GOUTSYM"/%"GOUTSYM" = %"GOUTSYM"\n",
		MyProcessorNumber, CellSize, CellWidth[0][0], 
		CellSize/CellWidth[0][0]);
	fprintf(stderr, "P(%d)(2): %"GOUTSYM" %"GOUTSYM" %"GOUTSYM"\n",
		MyProcessorNumber, OriginalLeftEdge[0], OriginalLeftEdge[1], 
		OriginalLeftEdge[2]);
	fprintf(stderr, "P(%d)(3): %"GOUTSYM" %"GOUTSYM" %"GOUTSYM"\n",
		MyProcessorNumber, GridLeftEdge[0], GridLeftEdge[1], 
		GridLeftEdge[2]);
	fprintf(stderr, "P(%d)(4): %"GOUTSYM" %"GOUTSYM" %"GOUTSYM"\n",
		MyProcessorNumber, GridRightEdge[0], GridRightEdge[1], 
		GridRightEdge[2]);
	fprintf(stderr, "P(%d)(5): %"GOUTSYM" %"GOUTSYM" %"GOUTSYM"\n",
		MyProcessorNumber, LeftEdge[0], LeftEdge[1], LeftEdge[2]);
	fprintf(stderr, "P(%d)(6): %d %d %d - %d %d %d\n",
		MyProcessorNumber, Offset[0], Offset[1], Offset[2],
		Dimension[0], Dimension[1], Dimension[2]);
	fprintf(stderr, "P(%d)(7): %"GOUTSYM" %d\n",
		MyProcessorNumber, (int(GridLeftEdge[dim]/CellSize)-2)*CellSize, 
		int(GridLeftEdge[dim]/CellSize));

	ENZO_VFAIL("Offset[%d] = %d < 0\n", dim, Offset[dim])
      }
      Dimension[dim] = int((GridRightEdge[dim] - LeftEdge[dim])/CellSize) + 3;
      size *= Dimension[dim];
    }

    /* Allocate buffer to communicate deposit region (unless in receive-mode
       in which case the buffer was already allocated in post-receive mode). */

#ifdef USE_MPI
    if (CommunicationDirection == COMMUNICATION_RECEIVE)
      DepositFieldPointer = 
	CommunicationReceiveBuffer[CommunicationReceiveIndex];
    else {
      DepositFieldPointer = new float[size];
      if (MyProcessorNumber == ProcessorNumber)
	for (i = 0; i < size; i++)
	  DepositFieldPointer[i] = 0;
    }
#endif /* USE_MPI */

  } // ENDIF different processors

  if (MyProcessorNumber == ProcessorNumber) {

    int addaspoints=0;
    // too add child grids as points rather than with CIC use the following line
    addaspoints= (CellSize > 1.5*CellWidth[0][0]) ? 1 : 0;
    
    /* If the Target is this grid and the DepositField is MassFlaggingField,
       then multiply the Particle density by the volume to get the mass. */

    if (this == TargetGrid && DepositField == MASS_FLAGGING_FIELD)
      for (dim = 0; dim < GridRank; dim++)
	MassFactor *= CellWidth[dim][0];
 
  /* If the DepositGrid and this grid are not the same, we must adjust the
     particle 'mass'. */
 
    if (this != TargetGrid) {
 
      /* Find the difference in resolution between this grid and TargetGrid. */
 
      float RefinementFactors[MAX_DIMENSION];
      this->ComputeRefinementFactorsFloat(TargetGrid, RefinementFactors);
 
      /* Compute the implied difference in 'mass' between particles in this
	 grid and those in TargetGrid. */
 
      for (dim = 0; dim < GridRank; dim++)
	MassFactor *= RefinementFactors[dim];
 
    } // ENDIF (this != TargetGrid)

    /* Check if we are smoothing. */

    int SmoothField = (DepositPositionsParticleSmoothRadius <= CellSize) ? FALSE : TRUE;
 
    /* If required, Change the mass of particles in this grid. */
 
    if (MassFactor != 1.0 || 
	((StarParticleCreation == (1 << SINK_PARTICLE)) && 
	 SmoothField == TRUE)) {
      ParticleMassTemp = new float[NumberOfParticles];
      for (i = 0; i < NumberOfParticles; i++)
	ParticleMassTemp[i] = ParticleMass[i]*MassFactor;
      ParticleMassPointer = ParticleMassTemp;
    } else
      ParticleMassPointer = ParticleMass;
 
    /* If the target field is MASS_FLAGGING_FIELD, then set masses of
       particles which are too large to DepositMaximumParticleMass
       (to prevent run-away refinement). */
 
    if (DepositField == MASS_FLAGGING_FIELD &&
	DepositParticleMaximumParticleMass > 0 && MassFactor != 1.0)
      for (i = 0; i < NumberOfParticles; i++)
	ParticleMassPointer[i] = min(DepositParticleMaximumParticleMass,
				     ParticleMassPointer[i]);
 
    /* Compute difference between current time and DepositTime. */
 
    TimeDifference = DepositTime - Time;
 
    /* Move particles to positions at Time + TimeDifference. */
 
    //The second argument forces the update even if 
    //MyProcessor == Target->ProcessorNumber != this->ProcessorNumber
    this->UpdateParticlePosition(TimeDifference, TRUE);

    /*
      
      Right now all active particles are unsmoothed.
      Will need to come back to this to optionally add smoothing.
    
    */

    if (NumberOfActiveParticles > 0) {
      FLOAT** ActiveParticlePosition = new FLOAT*[GridRank];
      for (dim = 0; dim < GridRank; dim++)
	ActiveParticlePosition[dim] = new FLOAT[NumberOfActiveParticles];
      this->GetActiveParticlePosition(ActiveParticlePosition);

      float ActiveParticleMassPointer[NumberOfActiveParticles];
      for (i = NumberOfActiveParticles; i < NumberOfParticles+NumberOfActiveParticles; i++) {
	ActiveParticleMassPointer[i] = ActiveParticles[i]->ReturnMass()*MassFactor;
      }

      PFORTRAN_NAME(cic_deposit)(
           ActiveParticlePosition[0], ActiveParticlePosition[1], ActiveParticlePosition[2], 
	   &GridRank, &NumberOfActiveParticles, ActiveParticleMassPointer, DepositFieldPointer, 
	   LeftEdge, Dimension, Dimension+1, Dimension+2, &CellSize, &addaspoints);

      for (dim = 0; dim < GridRank; dim++)
	delete [] ActiveParticlePosition[dim];
      delete [] ActiveParticlePosition;
    }
 
    /* Deposit particles. */

    if (SmoothField == FALSE) {

    /* Deposit to field using CIC. */
 
      //  fprintf(stderr, "------DP Call Fortran cic_deposit with CellSize = %"GSYM"\n", CellSize);
 

      PFORTRAN_NAME(cic_deposit)
	(ParticlePosition[0], ParticlePosition[1], ParticlePosition[2], 
	 &GridRank, &NumberOfParticles, ParticleMassPointer, DepositFieldPointer, 
	 LeftEdge, Dimension, Dimension+1, Dimension+2, &CellSize, &addaspoints);
    }
    else {

      /* Deposit to field using large-spherical CIC, with radius of
	 DepositPositionsParticleSmoothRadius */
 
      //  fprintf(stderr, "------DP Call Fortran smooth_deposit with DPPSmoothRadius = %"GSYM"\n", DepositPositionsParticleSmoothRadius);
 
      PFORTRAN_NAME(smooth_deposit)
	(ParticlePosition[0], ParticlePosition[1], ParticlePosition[2], &GridRank,
	 &NumberOfParticles, ParticleMassPointer, DepositFieldPointer, LeftEdge, 
	 Dimension, Dimension+1, Dimension+2, &CellSize, 
	 &DepositPositionsParticleSmoothRadius);
    }
    
  } // ENDIF this processor
 
  /* If on different processors, copy deposited field back to the
     target grid and add to the correct field. */

  if (ProcessorNumber != TargetGrid->ProcessorNumber) {

#ifdef USE_MPI

    /* If posting a receive, then record details of call. */

    if (CommunicationDirection == COMMUNICATION_POST_RECEIVE) {
      CommunicationReceiveGridOne[CommunicationReceiveIndex]  = this;
      CommunicationReceiveGridTwo[CommunicationReceiveIndex]  = TargetGrid;
      CommunicationReceiveCallType[CommunicationReceiveIndex] = 3;
      CommunicationReceiveArgument[0][CommunicationReceiveIndex] = DepositTime;
      CommunicationReceiveArgumentInt[0][CommunicationReceiveIndex] =
	                                                         DepositField;
    }

    MPI_Status status;
    MPI_Datatype DataType = (sizeof(float) == 4) ? MPI_FLOAT : MPI_DOUBLE;
    MPI_Arg Count = size;
    MPI_Arg Source = ProcessorNumber;
    MPI_Arg Dest = TargetGrid->ProcessorNumber;

    double time1 = ReturnWallTime();

    if (MyProcessorNumber == ProcessorNumber)
      CommunicationBufferedSend(DepositFieldPointer, Count, DataType, 
			     Dest, MPI_SENDREGION_TAG, 
			     EnzoTopComm, BUFFER_IN_PLACE);

    if (MyProcessorNumber == TargetGrid->ProcessorNumber &&
	CommunicationDirection == COMMUNICATION_SEND_RECEIVE)
      MPI_Recv(DepositFieldPointer, Count, DataType, Source, 
	       MPI_SENDREGION_TAG, EnzoTopComm, &status);

    if (MyProcessorNumber == TargetGrid->ProcessorNumber &&
	CommunicationDirection == COMMUNICATION_POST_RECEIVE) {
      MPI_Irecv(DepositFieldPointer, Count, DataType, Source, 
	        MPI_SENDREGION_TAG, EnzoTopComm, 
	        CommunicationReceiveMPI_Request+CommunicationReceiveIndex);
      CommunicationReceiveBuffer[CommunicationReceiveIndex] = 
	                                                  DepositFieldPointer;
      CommunicationReceiveDependsOn[CommunicationReceiveIndex] =
	  CommunicationReceiveCurrentDependsOn;
      CommunicationReceiveIndex++;
    }      

    CommunicationTime += ReturnWallTime() - time1;

#endif /* USE_MPI */

    if (MyProcessorNumber == TargetGrid->ProcessorNumber &&
	CommunicationDirection != COMMUNICATION_POST_RECEIVE) {

      index1 = 0;
      for (k = 0; k < Dimension[2]; k++)
	for (j = 0; j < Dimension[1]; j++) {
	  index2 = ((k+Offset[2])*OriginalDimension[1] + j + Offset[1])*
	           OriginalDimension[0] + Offset[0];
	  for (i = 0; i < Dimension[0]; i++)
	    OriginalDepositFieldPointer[index2++] += 
	                                      DepositFieldPointer[index1++];
	}

      delete [] DepositFieldPointer;

    } // end: if (MyProcessorNumber == TargetGrid->ProcessorNumber)

  } // end: If (ProcessorNumber != TargetGrid->ProcessorNumber)

  if (MyProcessorNumber == ProcessorNumber) {

    /* If necessary, delete the particle mass temporary. */

    if (MassFactor != 1.0)

      delete [] ParticleMassTemp;

    /* Return particles to positions at Time. */

    this->UpdateParticlePosition(-TimeDifference);

  }
 
  return SUCCESS;
}
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.