Commits

Matt Knepley committed fc4d687

DMSwarm: Initial non-working commit of Dave May's stuff

  • Participants
  • Parent commits fe34226
  • Branches knepley/swarm

Comments (0)

Files changed (7)

include/petsc-private/dmswarmimpl.h

+#if !defined(_SWARMIMPL_H)
+#define _SWARMIMPL_H
+
+#include <petscdmswarm.h> /*I      "petscdmswarm.h"    I*/
+#include "petsc-private/dmimpl.h"
+
+PETSC_EXTERN PetscLogEvent DMSWARM_Advect;
+
+/*********** swarm_fields.h ****************/
+#define DEFAULT -32654789
+
+#define DATAFIELD_POINT_ACCESS_GUARD
+
+typedef enum {DATABUCKET_VIEW_STDOUT=0, DATABUCKET_VIEW_ASCII, DATABUCKET_VIEW_BINARY, DATABUCKET_VIEW_HDF5} DataBucketViewType;
+
+typedef struct _p_DataField* DataField;
+typedef struct _p_DataBucket* DataBucket;
+
+struct _p_DataField {
+  char     *registeration_function;
+  PetscInt  L;
+  PetscBool active;
+  size_t    atomic_size;
+  char     *name; /* what are they called */
+  void     *data; /* the data - an array of structs */
+};
+
+struct _p_DataBucket {
+  PetscInt   L; /* number in use */
+  PetscInt   buffer; /* memory buffer used for re-allocation */
+  PetscInt   allocated;  /* number allocated, this will equal datafield->L */
+  PetscBool  finalised;
+  PetscInt   nfields; /* how many fields of this type */
+  DataField *field; /* the data */
+};
+
+#define __DATATFIELD_point_access(data,index,atomic_size) (void*)((char*)(data) + (index)*(atomic_size))
+#define __DATATFIELD_point_access_offset(data,index,atomic_size,offset) (void*)((char*)(data) + (index)*(atomic_size) + (offset))
+
+void StringInList( const char name[], const PetscInt N, const DataField gfield[], BTruth *val );
+void StringFindInList( const char name[], const PetscInt N, const DataField gfield[], PetscInt *index );
+
+void DataFieldCreate( const char registeration_function[], const char name[], const size_t size, const PetscInt L, DataField *DF );
+void DataFieldDestroy( DataField *DF );
+void DataBucketCreate( DataBucket *DB );
+void DataBucketDestroy( DataBucket *DB );
+void _DataBucketRegisterField(DataBucket db, const char registeration_function[], const char field_name[], size_t atomic_size, DataField *_gfield);
+
+#define DataBucketRegisterField(db,name,size,k) {\
+  char *location;\
+  asprintf(&location,"Registered by %s() at line %d within file %s", __FUNCTION__, __LINE__, __FILE__);\
+  _DataBucketRegisterField( (db), location, (name), (size), (k) );\
+  free(location);\
+}
+
+void DataFieldGetNumEntries(DataField df, PetscInt *sum);
+void DataFieldSetSize( DataField df, const PetscInt new_L );
+void DataFieldZeroBlock( DataField df, const PetscInt start, const PetscInt end );
+void DataFieldGetAccess( const DataField gfield );
+void DataFieldAccessPoint( const DataField gfield, const PetscInt pid, void **ctx_p );
+void DataFieldAccessPointOffset( const DataField gfield, const size_t offset, const PetscInt pid, void **ctx_p );
+void DataFieldRestoreAccess( DataField gfield );
+void DataFieldVerifyAccess( const DataField gfield, const size_t size);
+void DataFieldInsertPoint( const DataField field, const PetscInt index, const void *ctx ); 
+void DataFieldCopyPoint( const PetscInt pid_x, const DataField field_x,
+												const PetscInt pid_y, const DataField field_y );
+void DataFieldZeroPoint( const DataField field, const PetscInt index ); 
+
+void DataBucketGetDataFieldByName(DataBucket db,const char name[],DataField *gfield);
+void DataBucketQueryDataFieldByName(DataBucket db,const char name[],BTruth *found);
+void DataBucketFinalize(DataBucket db);
+void DataBucketSetInitialSizes( DataBucket db, const PetscInt L, const PetscInt buffer );
+void DataBucketSetSizes( DataBucket db, const PetscInt L, const PetscInt buffer );
+void DataBucketGetSizes( DataBucket db, PetscInt *L, PetscInt *buffer, PetscInt *allocated );
+void DataBucketGetDataFields( DataBucket db, PetscInt *L, DataField *fields[] );
+
+void DataBucketCopyPoint( const DataBucket xb, const PetscInt pid_x,
+												 const DataBucket yb, const PetscInt pid_y );
+void DataBucketCreateFromSubset( DataBucket DBIn, const PetscInt N, const PetscInt list[], DataBucket *DB );
+void DataBucketZeroPoint( const DataBucket db, const PetscInt index );
+
+void DataBucketLoadFromFile(const char filename[], DataBucketViewType type, DataBucket *db);
+void DataBucketView(DataBucket db,const char filename[],DataBucketViewType type);
+
+void DataBucketAddPoint( DataBucket db );
+void DataBucketRemovePoint( DataBucket db );
+void DataBucketRemovePointAtIndex( const DataBucket db, const PetscInt index );
+/*********** swarm_fields.h ****************/
+
+typedef struct {
+  PetscInt        refct;
+  DataBucket      db;             /* Holds data on particles */
+  DM              vdm;            /* DM describing velocity data */
+  DataEx          ex;             /* Holds information for parallelism (replace with PetscSF) */
+  DSwarmPlacement pointPlacement; /* Placement method for points */
+} DM_Swarm;
+
+#endif /* _SWARMIMPL_H */

include/petscdmswarm.h

+/*
+  DMSwarm, for parallel particle problems.
+*/
+#if !defined(__PETSCDMSWARM_H)
+#define __PETSCDMSWARM_H
+
+#include <petscdm.h>
+
+/*S
+  DMSWARM - DM object that encapsulates a set of particles. These are often used as Lagrangian traces and in the Material Point Method.
+
+  Level: intermediate
+
+  Concepts: particles
+
+.seealso:  DM, DMSwarmCreate()
+S*/
+PETSC_EXTERN PetscErrorCode DMSwarmCreate(MPI_Comm, DM*);
+PETSC_EXTERN PetscErrorCode DMSwarmClone(DM, DM*);
+
+#endif

src/dm/impls/swarm/makefile

+ALL: lib
+
+CPPFLAGS =
+CFLAGS   =
+FFLAGS   =
+SOURCEC  = swarmcreate.c swarm.c
+SOURCEF  =
+SOURCEH  =
+DIRS     = examples
+LIBBASE  = libpetscdm
+MANSEC   = DM
+SUBMANSEC= DMSWARM
+LOCDIR   = src/dm/impls/swarm/
+
+include ${PETSC_DIR}/conf/variables
+include ${PETSC_DIR}/conf/rules
+include ${PETSC_DIR}/conf/test

src/dm/impls/swarm/swarm.c

+#include <petsc-private/dmswarmimpl.h>   /*I      "petscdmswarm.h"   I*/
+
+/* Logging support */
+PetscLogEvent DMSWARM_Advect;
+
+#undef __FUNCT__
+#define __FUNCT__ "DMView_Swarm"
+PetscErrorCode DMView_Swarm(DM dm, PetscViewer viewer)
+{
+  PetscBool      iascii, isbinary;
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
+  PetscValidHeaderSpecific(viewer, PETSC_VIEWER_CLASSID, 2);
+  ierr = PetscObjectTypeCompare((PetscObject) viewer, PETSCVIEWERASCII, &iascii);CHKERRQ(ierr);
+  ierr = PetscObjectTypeCompare((PetscObject) viewer, PETSCVIEWERBINARY, &isbinary);CHKERRQ(ierr);
+#if 0
+  if (iascii) {
+    ierr = DMSwarmView_Ascii(dm, viewer);CHKERRQ(ierr);
+  } else if (isbinary) {
+    ierr = DMSwarmView_Binary(dm, viewer);CHKERRQ(ierr);
+  }
+#endif
+  PetscFunctionReturn(0);
+}
+
+#undef __FUNCT__
+#define __FUNCT__ "DMDestroy_Swarm"
+PetscErrorCode DMDestroy_Swarm(DM dm)
+{
+  DM_Swarm      *swarm = (DM_Swarm*) dm->data;
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  if (--swarm->refct > 0) PetscFunctionReturn(0);
+  ierr = DMDestroy(&user->vdm);CHKERRQ(ierr);
+  ierr = DataExDestroy(swarm->ex);CHKERRQ(ierr);
+  ierr = DataBucketDestroy(&swarm->db);CHKERRQ(ierr);
+  /* This was originally freed in DMDestroy(), but that prevents reference counting of backend objects */
+  ierr = PetscFree(swarm);CHKERRQ(ierr);
+  PetscFunctionReturn(0);
+}
+
+#undef __FUNCT__
+#define __FUNCT__ "DMSetUp_Swarm"
+PetscErrorCode DMSetUp_Swarm(DM dm)
+{
+  DM_Swarm      *swarm = (DM_Swarm*) dm->data;
+  PetscInt       pointSize = 2*sizeof(PetscScalar), initSize, maxSize;
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
+  /* Create DataBucket */
+  ierr = DataBucketCreate(&swarm->db);CHKERRQ(ierr);
+  ierr = DataBucketRegisterField(swarm->db, "default", pointSize, PETSC_NULL);CHKERRQ(ierr);
+  ierr = DataBucketFinalize(db);CHKERRQ(ierr);
+  initSize = 1000; /* Could initialize based upon velocity DM */
+  maxSize  = initSize;
+  ierr = DataBucketSetInitialSizes(swarm->db, initSize, maxSize);CHKERRQ(ierr);
+  /* Set point coordinates */
+  if (swarm->pointPlacement == SWARM_PLACEMENT_LATTICE) {
+    PetscInt  Nxp[]   = {2,2}; /* change with -lattice_layout_N{x,y,z} */
+    PetscReal perturb = 0.1;   /* change with -lattice_layout_perturb */
+
+    ierr = SwarmMPntStd_CoordAssignment_LatticeLayout2d(swarm->vdm, Nxp, perturb, swarm->db);CHKERRQ(ierr);
+  } else if (swarm->pointPlacement == SWARM_PLACEMENT_RANDOM) {
+    PetscInt nPerCell = 9; /* change with -random_layout_Np */
+
+    ierr = SwarmMPntStd_CoordAssignment_RandomLayout2d(swarm->vdm, nPerCell, swarm->db);CHKERRQ(ierr);
+  }
+  /* Create the data exchanger needed for parallel particle movement */
+  ierr = SwarmDMDA2dDataExchangerCreate(swarm->vdm, &swarm->ex);CHKERRQ(ierr);
+  PetscFunctionReturn(0);
+}
+
+#undef __FUNCT__
+#define __FUNCT__ "DMCreateCoordinateDM_Swarm"
+PetscErrorCode DMCreateCoordinateDM_Swarm(DM dm, DM *cdm)
+{
+  PetscSection   section;
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  ierr = DMSwarmClone(dm, cdm);CHKERRQ(ierr);
+  ierr = PetscSectionCreate(((PetscObject) dm)->comm, &section);CHKERRQ(ierr);
+  ierr = DMSetDefaultSection(*cdm, section);CHKERRQ(ierr);
+  PetscFunctionReturn(0);
+}

src/dm/impls/swarm/swarm_fields.c

+/*
+
+gcc -O3 -g -c swarm_fields.c
+ 
+ // GENERIC //
+ // insert into an exisitng location
+ SwarmFieldInsertPoint( field, index, ctx ) 
+ // remove data at index - replace with last point
+ SwarmFieldRemovePoint( field, index, ctx )
+
+
+ // USER PROVIDED //
+ ParticleDataLoad_XXX
+ ParticleDataView_XXX
+ 
+ Usage:
+ 
+ 
+ DataBucketGetKeyByName(db,"particle_data_standard",&K);
+ DataFieldGetAccess( K, &gfield );
+ DataFieldVerifyAccess( gfield,sizeof(ParticleData_Standard));
+
+ // LOOP
+ DataFieldAccessPoint(gfield,index,(void**)&stdParticle);
+
+ // REPLACE 
+ ParticleData_Standard my_point;
+ ParticleDataLoad_Standard( &my_point, LOAD_CRAP_HERE );
+ SwarmFieldInsertPoint( field, index, &my_point );
+ 
+ DataFieldRestoreAccess( K, &gfield );
+ 
+ // EXPAND - SHRINK set via 
+ DataBucketSetSizes(db,newvalue, buffer); // -ve buffer val retains old value //
+ 
+ 
+ 
+ Proposed memory management for adding / deleting particles
+ 
+ A: active
+ B: buffer / deactive
+ D: deactive - formerly active point which was deleted
+
+ initial memory layout
+ AAAAAAAAAAAAAAAAA BBBBB
+
+ 1) fill up all buffer spots, then we reallocate
+ AAAAAAAAAAAAAAAAA AAAAA (reallocate larger)
+ AAAAAAAAAAAAAAAAA AAAAA BBBBB [RESULT]
+
+ 
+ 2) deactivate  buffer spots, then we reallocate 
+ AAAAAAAAAAAAAA DDD BBBBB
+ AAAAAAAAAAAA DDDDD BBBBB (reallocate smaller)
+ AAAAAAAAAAAA BBBBB [RESULT]
+ 
+*/
+
+
+#include "stdio.h"
+#include "stdlib.h"
+#include "string.h"
+#include "math.h"
+
+
+#include "swarm_fields.h"
+
+
+/* string helpers */
+void StringInList( const char name[], const int N, const DataField gfield[], BTruth *val )
+{
+	int i;
+	
+	*val = BFALSE;
+	for( i=0; i<N; i++ ) {
+		if( strcmp( name, gfield[i]->name ) == 0 ) {
+			*val = BTRUE;
+			return;
+		}
+	}
+}
+
+void StringFindInList( const char name[], const int N, const DataField gfield[], int *index )
+{
+	int i;
+	
+	*index = -1;
+	for( i=0; i<N; i++ ) {
+		if( strcmp( name, gfield[i]->name ) == 0 ) {
+			*index = i;
+			return;
+		}
+	}
+}
+
+void DataFieldCreate( const char registeration_function[], const char name[], const size_t size, const int L, DataField *DF )
+{
+	DataField df;
+	int i;
+	
+	df = malloc( sizeof(struct _p_DataField) );
+	memset( df, 0, sizeof(struct _p_DataField) ); 
+	
+	
+	asprintf( &df->registeration_function, "%s", registeration_function );
+	asprintf( &df->name, "%s", name );
+	df->atomic_size = size;
+	df->L = L;
+	
+	df->data = malloc( size * L ); /* allocate something so we don't have to reallocate */
+	memset( df->data, 0, size * L );
+	
+	*DF = df;
+}
+
+void DataFieldDestroy( DataField *DF )
+{
+	DataField df = *DF;
+	int i;
+	
+	free( df->registeration_function );
+	free( df->name );
+	free( df->data );
+	free(df);
+	
+	*DF = NULL;
+}
+
+/* data bucket */
+void DataBucketCreate( DataBucket *DB )
+{
+	DataBucket db;
+	
+	
+	db = malloc( sizeof(struct _p_DataBucket) );
+	memset( db, 0, sizeof(struct _p_DataBucket) );
+
+	db->finalised = BFALSE;
+	
+	/* create empty spaces for fields */
+	db->L         = 0;
+	db->buffer    = 1;
+	db->allocated = 1;
+
+	db->nfields   = 0;
+	db->field     = malloc(sizeof(DataField));
+	
+	*DB = db;
+}
+
+void DataBucketDestroy( DataBucket *DB )
+{
+	DataBucket db = *DB;
+	int f;
+	
+	/* release fields */
+	for( f=0; f<db->nfields; f++ ) {
+		DataFieldDestroy(&db->field[f]);	
+	}
+
+	/* this will catch the initially allocated objects in the event that no fields are registered */
+	if(db->field!=NULL) {
+		free(db->field);
+	}
+	
+	free(db);
+	
+	*DB = NULL;
+}
+
+void _DataBucketRegisterField(
+						DataBucket db,
+						const char registeration_function[],
+						const char field_name[],
+						size_t atomic_size, DataField *_gfield )
+{
+	BTruth val;
+	DataField *field,fp;
+	
+	/* check we haven't finalised the registration of fields */
+	if(db->finalised==BTRUE) {
+		printf("ERROR: DataBucketFinalize() has been called. Cannot register more fields\n");
+		ERROR();
+	}
+	
+	/* check for repeated name */
+	StringInList( field_name, db->nfields, (const DataField*)db->field, &val );
+	if(val == BTRUE ) {
+		printf("ERROR: Cannot add same field twice\n");
+		ERROR();
+	}
+
+	/* create new space for data */
+	field = realloc( db->field,     sizeof(DataField)*(db->nfields+1));
+	db->field     = field;
+	
+	/* add field */
+	DataFieldCreate( registeration_function, field_name, atomic_size, db->allocated, &fp );
+	db->field[ db->nfields ] = fp;
+	
+	db->nfields++;
+	
+	if(_gfield!=NULL){
+		*_gfield = fp;
+	}
+}
+
+/*
+#define DataBucketRegisterField(db,name,size,k) {\
+  char *location;\
+  asprintf(&location,"Registered by %s() at line %d within file %s", __FUNCTION__, __LINE__, __FILE__);\
+  _DataBucketRegisterField( (db), location, (name), (size), (k) );\
+  free(location);\
+}
+*/
+
+void DataBucketGetDataFieldByName(DataBucket db,const char name[],DataField *gfield)
+{
+	int idx;
+	BTruth found;
+	
+	StringInList(name,db->nfields,(const DataField*)db->field,&found);
+	if(found==BFALSE) {
+		printf("ERROR: Cannot find DataField with name %s \n", name );
+		ERROR();
+	}
+	StringFindInList(name,db->nfields,(const DataField*)db->field,&idx);
+		
+	*gfield = db->field[idx];
+}
+
+void DataBucketQueryDataFieldByName(DataBucket db,const char name[],BTruth *found)
+{
+	*found = BFALSE;
+	StringInList(name,db->nfields,(const DataField*)db->field,found);
+}
+
+void DataBucketFinalize(DataBucket db)
+{
+	db->finalised = BTRUE;
+}
+
+void DataFieldGetNumEntries(DataField df, int *sum)
+{
+	*sum = df->L;
+}
+
+void DataFieldSetSize( DataField df, const int new_L )
+{
+	int i;
+	void *tmp_data;
+	
+	if( new_L <= 0 ) {
+		printf("ERROR: Cannot set size of DataField to be <= 0 \n");
+		ERROR();
+	}
+	if( new_L == df->L ) return;
+	
+	if( new_L > df->L ) {
+		
+		tmp_data = realloc( df->data, df->atomic_size * (new_L) );
+		df->data = tmp_data;
+		
+		/* init new contents */
+		memset( (df->data+df->L*df->atomic_size), 0, (new_L-df->L)*df->atomic_size );
+		
+	}
+	else {
+		/* reallocate pointer list, add +1 in case new_L = 0 */
+		tmp_data = realloc( df->data, df->atomic_size * (new_L+1) );
+		df->data = tmp_data;
+	}
+	
+	df->L = new_L;
+}
+
+void DataFieldZeroBlock( DataField df, const int start, const int end )
+{
+	int i;
+	void *tmp_data;
+	
+	if( start > end ) {
+		printf("ERROR: Cannot zero a block of entries if start(%d) > end(%d) \n",start,end);
+		ERROR();
+	}
+	if( start < 0 ) {
+		printf("ERROR: Cannot zero a block of entries if start(%d) < 0 \n",start);
+		ERROR();
+	}
+	if( end > df->L ) {
+		printf("ERROR: Cannot zero a block of entries if end(%d) >= array size(%d) \n",end,df->L);
+		ERROR();
+	}
+	
+	memset( (df->data+start*df->atomic_size), 0, (end-start)*df->atomic_size );
+}
+
+/*
+ A negative buffer value will simply be ignored and the old buffer value will be used.
+ */
+void DataBucketSetSizes( DataBucket db, const int L, const int buffer )
+{
+	int currentlength,newlength,current_allocated,new_used,new_unused,new_buffer,new_allocated,f;
+	
+	if( db->finalised == BFALSE ) {
+		printf("ERROR: You must call DataBucketFinalize() before DataBucketSetSizes() \n");
+		ERROR();
+	}
+	
+	current_allocated = db->allocated;
+	
+	new_used   = L;
+	new_unused = current_allocated - new_used;
+	new_buffer = db->buffer;
+	if( buffer >= 0 ) { /* update the buffer value */
+		new_buffer = buffer;
+	}
+	new_allocated = new_used + new_buffer;
+	
+	/* action */
+	if ( new_allocated > current_allocated ) {
+		/* increase size to new_used + new_buffer */
+		for( f=0; f<db->nfields; f++ ) {
+			DataFieldSetSize( db->field[f], new_allocated );
+		}
+		
+		db->L         = new_used;
+		db->buffer    = new_buffer;
+		db->allocated = new_used + new_buffer;
+	}
+	else {
+		if( new_unused > 2 * new_buffer ) {
+			
+			/* shrink array to new_used + new_buffer */
+			for( f=0; f<db->nfields; f++ ) {
+				DataFieldSetSize( db->field[f], new_allocated );
+			}
+			
+			db->L         = new_used;
+			db->buffer    = new_buffer;
+			db->allocated = new_used + new_buffer;
+		}
+		else {
+			db->L      = new_used;
+			db->buffer = new_buffer;
+		}
+	}
+	
+	/* zero all entries from db->L to db->allocated */
+	for( f=0; f<db->nfields; f++ ) {
+		DataField field = db->field[f];
+		DataFieldZeroBlock(field, db->L,db->allocated);
+	}
+}
+
+void DataBucketSetInitialSizes( DataBucket db, const int L, const int buffer )
+{
+	int f;
+	DataBucketSetSizes(db,L,buffer);
+	
+	for( f=0; f<db->nfields; f++ ) {
+		DataField field = db->field[f];
+		DataFieldZeroBlock(field,0,db->allocated);
+	}
+}
+
+void DataBucketGetSizes( DataBucket db, int *L, int *buffer, int *allocated )
+{
+	if(L){ *L = db->L; }
+	if(buffer){ *buffer = db->buffer; }
+	if(allocated){ *allocated = db->allocated; }
+}
+void DataBucketGetDataFields( DataBucket db, int *L, DataField *fields[] )
+{
+	if(L){      *L      = db->nfields; }
+	if(fields){ *fields = db->field; }
+}
+
+
+void DataFieldGetAccess( const DataField gfield )
+{
+	if(gfield->active==BTRUE) {
+		printf("ERROR: Field \"%s\" is already active. You must call DataFieldRestoreAccess()\n", gfield->name );
+		ERROR();
+	}
+	gfield->active = BTRUE;
+}
+
+void DataFieldAccessPoint( const DataField gfield, const int pid, void **ctx_p )
+{
+#ifdef DATAFIELD_POINT_ACCESS_GUARD
+	/* debug mode */
+	/* check point is valid */
+	if( pid < 0 ){ printf("ERROR: index must be >= 0\n"); ERROR();  }
+	if( pid >= gfield->L ){ printf("ERROR: index must be < %d\n",gfield->L); ERROR(); }
+
+	if(gfield->active==BFALSE) {
+		printf("ERROR: Field \"%s\" is not active. You must call DataFieldGetAccess() before point data can be retrivied\n",gfield->name);
+		ERROR();
+	}
+#endif
+	
+	//*ctx_p  = (void*)( ((char*)gfield->data) + pid * gfield->atomic_size);
+	*ctx_p = __DATATFIELD_point_access(gfield->data,pid,gfield->atomic_size);
+}
+
+void DataFieldAccessPointOffset( const DataField gfield, const size_t offset, const int pid, void **ctx_p )
+{
+#ifdef DATAFIELD_POINT_ACCESS_GUARD
+	/* debug mode */
+	
+	/* check point is valid */
+	if( offset < 0 ){ printf("ERROR: offset must be >= 0\n"); ERROR();  }
+	if( offset >= gfield->atomic_size ){ printf("ERROR: offset must be < %zu\n",gfield->atomic_size); ERROR(); }
+	
+	/* check point is valid */
+	if( pid < 0 ){ printf("ERROR: index must be >= 0\n"); ERROR();  }
+	if( pid >= gfield->L ){ printf("ERROR: index must be < %d\n",gfield->L); ERROR(); }
+
+	if(gfield->active==BFALSE) {
+		printf("ERROR: Field \"%s\" is not active. You must call DataFieldGetAccess() before point data can be retrivied\n",gfield->name);
+		ERROR();
+	}
+#endif
+	
+	*ctx_p = __DATATFIELD_point_access_offset(gfield->data,pid,gfield->atomic_size,offset);
+}
+
+
+void DataFieldRestoreAccess( DataField gfield )
+{
+	if(gfield->active==BFALSE) {
+		printf("ERROR: Field \"%s\" is not active. You must call DataFieldGetAccess()\n", gfield->name );
+		ERROR();
+	}
+	gfield->active = BFALSE;
+}
+
+/* y = x */
+void DataBucketCopyPoint( const DataBucket xb, const int pid_x,
+												  const DataBucket yb, const int pid_y )
+{
+	int f;
+	for( f=0; f<xb->nfields; f++ ) {
+		void *dest;
+		void *src;
+		
+		DataFieldGetAccess( xb->field[f] );
+		if (xb!=yb) { DataFieldGetAccess( yb->field[f] ); }
+		
+		DataFieldAccessPoint( xb->field[f],pid_x, &src );
+		DataFieldAccessPoint( yb->field[f],pid_y, &dest );
+		
+		memcpy( dest, src, xb->field[f]->atomic_size );
+		
+		DataFieldRestoreAccess( xb->field[f] );
+		if (xb!=yb) { DataFieldRestoreAccess( yb->field[f] ); }
+	}
+	
+}
+
+void DataBucketCreateFromSubset( DataBucket DBIn, const int N, const int list[], DataBucket *DB )
+{
+	int nfields;
+	DataField *fields;
+	DataBucketCreate(DB);
+	int f,L,buffer,allocated,p;
+	
+	/* copy contents of DBIn */
+	DataBucketGetDataFields(DBIn,&nfields,&fields);
+	DataBucketGetSizes(DBIn,&L,&buffer,&allocated);
+	
+	for(f=0;f<nfields;f++) {
+		DataBucketRegisterField(*DB,fields[f]->name,fields[f]->atomic_size,NULL);
+	}
+	DataBucketFinalize(*DB);
+	
+	DataBucketSetSizes(*DB,L,buffer);
+	
+	/* now copy the desired guys from DBIn => DB */
+	for( p=0; p<N; p++ ) {
+		DataBucketCopyPoint(DBIn,list[p], *DB,p);
+	}
+	
+}
+
+void DataFieldVerifyAccess( const DataField gfield, const size_t size)
+{
+#ifdef DATAFIELD_POINT_ACCESS_GUARD
+	if(gfield->atomic_size != size ) {
+			printf("ERROR: Field \"%s\" must be mapped to %zu bytes, your intended structure is %zu bytes in length.\n",
+						 gfield->name, gfield->atomic_size, size );
+		ERROR();
+	}
+#endif
+}
+
+// insert into an exisitng location
+void DataFieldInsertPoint( const DataField field, const int index, const void *ctx ) 
+{
+
+#ifdef DATAFIELD_POINT_ACCESS_GUARD
+	/* check point is valid */
+	if( index < 0 ){ printf("ERROR: index must be >= 0\n"); ERROR();  }
+	if( index >= field->L ){ printf("ERROR: index must be < %d\n",field->L); ERROR(); }
+#endif
+	
+//	memcpy( (void*)((char*)field->data + index*field->atomic_size), ctx, field->atomic_size );
+	memcpy( __DATATFIELD_point_access(field->data,index,field->atomic_size), ctx, field->atomic_size );
+}
+
+// remove data at index - replace with last point
+void DataBucketRemovePointAtIndex( const DataBucket db, const int index )
+{
+	int f;
+	
+#ifdef DATAFIELD_POINT_ACCESS_GUARD
+	/* check point is valid */
+	if( index < 0 ){ printf("ERROR: index must be >= 0\n"); ERROR(); }
+	if( index >= db->allocated ){ printf("ERROR: index must be < %d\n",db->L+db->buffer); ERROR(); }
+#endif	
+	
+	if (index >= db->L) { /* this point is not in the list - no need to error, but I will anyway */
+		printf("ERROR: You should not be trying to remove point at index=%d since it's < db->L = %d \n", index, db->L );
+		ERROR();
+	}
+	
+#if 0	
+	if (index == db->L-1) { /* last point in list */
+		for( f=0; f<db->nfields; f++ ) {
+			DataField field = db->field[f];
+
+			DataFieldZeroPoint(field,index);
+		}
+	}
+	else {
+		for( f=0; f<db->nfields; f++ ) {
+			DataField field = db->field[f];
+
+			/* copy then remove */
+			DataFieldCopyPoint( db->L-1,field, index,field ); 
+			
+			DataFieldZeroPoint(field,index);
+		}
+	}
+#endif
+
+	if (index != db->L-1) { /* not last point in list */
+		for( f=0; f<db->nfields; f++ ) {
+			DataField field = db->field[f];
+			
+			/* copy then remove */
+			DataFieldCopyPoint( db->L-1,field, index,field ); 
+			
+			//DataFieldZeroPoint(field,index);
+		}
+	}
+	
+	/* decrement size */
+	/* this will zero out an crap at the end of the list */
+	DataBucketRemovePoint(db);
+	
+}
+
+/* copy x into y */
+void DataFieldCopyPoint( const int pid_x, const DataField field_x,
+												 const int pid_y, const DataField field_y ) 
+{
+
+#ifdef DATAFIELD_POINT_ACCESS_GUARD	
+	/* check point is valid */
+	if( pid_x < 0 ){ printf("ERROR: (IN) index must be >= 0\n"); ERROR(); }
+	if( pid_x >= field_x->L ){ printf("ERROR: (IN) index must be < %d\n",field_x->L); ERROR(); }
+
+	if( pid_y < 0 ){ printf("ERROR: (OUT) index must be >= 0\n"); ERROR(); }
+	if( pid_y >= field_y->L ){ printf("ERROR: (OUT) index must be < %d\n",field_y->L); ERROR(); }
+
+	if( field_y->atomic_size != field_x->atomic_size ) {
+		printf("ERROR: atomic size must match \n"); ERROR();
+	}
+#endif	
+	/*
+	memcpy( (void*)((char*)field_y->data + pid_y*field_y->atomic_size), 
+					(void*)((char*)field_x->data + pid_x*field_x->atomic_size), 
+				  field_x->atomic_size );
+	*/
+	memcpy(		__DATATFIELD_point_access(field_y->data,pid_y,field_y->atomic_size),
+						__DATATFIELD_point_access(field_x->data,pid_x,field_x->atomic_size),
+						field_y->atomic_size );
+	
+}
+
+
+// zero only the datafield at this point
+void DataFieldZeroPoint( const DataField field, const int index ) 
+{
+#ifdef DATAFIELD_POINT_ACCESS_GUARD
+	/* check point is valid */
+	if( index < 0 ){ printf("ERROR: index must be >= 0\n"); ERROR(); }
+	if( index >= field->L ){ printf("ERROR: index must be < %d\n",field->L); ERROR(); }
+#endif	
+	
+//	memset( (void*)((char*)field->data + index*field->atomic_size), 0, field->atomic_size );
+	memset( __DATATFIELD_point_access(field->data,index,field->atomic_size), 0, field->atomic_size );
+}
+
+// zero ALL data for this point
+void DataBucketZeroPoint( const DataBucket db, const int index ) 
+{
+	int f;
+	
+	/* check point is valid */
+	if( index < 0 ){ printf("ERROR: index must be >= 0\n"); ERROR(); }
+	if( index >= db->allocated ){ printf("ERROR: index must be < %d\n",db->allocated); ERROR(); }
+	
+	for(f=0;f<db->nfields;f++){
+		DataField field = db->field[f];
+		
+		DataFieldZeroPoint(field,index);
+	}	
+}
+
+/* increment */
+void DataBucketAddPoint( DataBucket db )
+{
+	DataBucketSetSizes( db, db->L+1, -1 );
+}
+/* decrement */
+void DataBucketRemovePoint( DataBucket db )
+{
+	DataBucketSetSizes( db, db->L-1, -1 );
+}
+
+void _DataFieldViewBinary(DataField field, FILE *fp )
+{
+	fprintf(fp,"<DataField>\n");
+	fprintf(fp,"%d\n", field->L);
+	fprintf(fp,"%zu\n",field->atomic_size);
+	fprintf(fp,"%s\n", field->registeration_function);
+	fprintf(fp,"%s\n", field->name);
+	
+	fwrite(field->data, field->atomic_size, field->L, fp);
+	printf("  ** wrote %zu bytes for DataField \"%s\" \n", field->atomic_size * field->L, field->name );
+	fprintf(fp,"\n</DataField>\n");
+}
+
+void _DataBucketRegisterFieldFromFile( FILE *fp, DataBucket db )
+{
+	BTruth val;
+	DataField *field;
+
+	DataField gfield;
+	char dummy[100];
+	char registeration_function[5000];
+	char field_name[5000];
+	int L;
+	size_t atomic_size,strL;
+	
+	
+	/* check we haven't finalised the registration of fields */
+	if(db->finalised==BTRUE) {
+		printf("ERROR: DataBucketFinalize() has been called. Cannot register more fields\n");
+		ERROR();
+	}
+
+	/* read file contents */
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+	
+	fscanf( fp, "%d\n",&L); //printf("read(L): %d\n", L);
+	
+	fscanf( fp, "%zu\n",&atomic_size); //printf("read(size): %zu\n",atomic_size);
+	
+	fgets(registeration_function,4999,fp); //printf("read(reg func): %s", registeration_function );
+	strL = strlen(registeration_function);
+	if(strL>1){ 
+		registeration_function[strL-1] = 0;
+	}
+	
+	fgets(field_name,4999,fp); //printf("read(name): %s", field_name );
+	strL = strlen(field_name);
+	if(strL>1){ 
+		field_name[strL-1] = 0;
+	}
+		
+	printf("  ** read L=%d; atomic_size=%zu; reg_func=\"%s\"; name=\"%s\" \n", L,atomic_size,registeration_function,field_name);
+	
+	
+	/* check for repeated name */
+	StringInList( field_name, db->nfields, (const DataField*)db->field, &val );
+	if(val == BTRUE ) {
+		printf("ERROR: Cannot add same field twice\n");
+		ERROR();
+	}
+	
+	/* create new space for data */
+	field = realloc( db->field,     sizeof(DataField)*(db->nfields+1));
+	db->field     = field;
+	
+	/* add field */
+	DataFieldCreate( registeration_function, field_name, atomic_size, L, &gfield );
+
+	/* copy contents of file */
+	fread(gfield->data, gfield->atomic_size, gfield->L, fp);
+	printf("  ** read %zu bytes for DataField \"%s\" \n", gfield->atomic_size * gfield->L, field_name );
+	
+	/* finish reading meta data */
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+	
+	db->field[ db->nfields ] = gfield;
+	
+	db->nfields++;
+	
+}
+
+void _DataBucketViewAscii_HeaderWrite_v00(FILE *fp)
+{
+	fprintf(fp,"<DataBucketHeader>\n");
+	fprintf(fp,"type=DataBucket\n");
+	fprintf(fp,"format=ascii\n");
+	fprintf(fp,"version=0.0\n");
+	fprintf(fp,"options=\n");
+	fprintf(fp,"</DataBucketHeader>\n");
+}
+void _DataBucketViewAscii_HeaderRead_v00(FILE *fp)
+{	
+	char dummy[100];
+	size_t strL;
+
+	// header open
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+
+	// type
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+	strL = strlen(dummy);
+	if(strL>1) { dummy[strL-1] = 0; }
+	if(strcmp(dummy,"type=DataBucket")!=0) {
+		printf("ERROR: Data file doesn't contain a DataBucket type\n");
+		ERROR();
+	}
+
+	// format
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+
+	// version
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+	strL = strlen(dummy);
+	if(strL>1) { dummy[strL-1] = 0; }
+	if(strcmp(dummy,"version=0.0")!=0) {
+		printf("ERROR: DataBucket file must be parsed with version=0.0 : You tried %s \n", dummy);
+		ERROR();
+	}
+	
+	// options
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+	// header close
+	fgets(dummy,99,fp); //printf("read(header): %s", dummy );
+}
+
+
+void _DataBucketLoadFromFileBinary(const char filename[], DataBucket *_db)
+{
+	DataBucket db;
+	FILE *fp;
+	int L,buffer,f,nfields;
+	
+	printf("** DataBucketLoadFromFile **\n");
+
+	
+	/* open file */
+	fp = fopen(filename,"rb");
+	if(fp==NULL){
+		printf("ERROR: Cannot open file with name %s \n", filename);
+		ERROR();
+	}
+
+	/* read header */
+	_DataBucketViewAscii_HeaderRead_v00(fp);
+	
+	fscanf(fp,"%d\n%d\n%d\n",&L,&buffer,&nfields);
+	
+	DataBucketCreate(&db);
+	
+	for( f=0; f<nfields; f++ ) {
+		_DataBucketRegisterFieldFromFile(fp,db);
+	}
+	fclose(fp);
+	
+	DataBucketFinalize(db);
+
+	
+/*	
+  DataBucketSetSizes(db,L,buffer);
+*/
+	db->L = L;
+	db->buffer = buffer;
+	
+	*_db = db;
+}
+
+void DataBucketLoadFromFile(const char filename[], DataBucketViewType type, DataBucket *db)
+{
+	printf("** DataBucketLoadFromFile **\n");
+	if(type==DATABUCKET_VIEW_STDOUT) {
+		
+	} else if(type==DATABUCKET_VIEW_ASCII) {
+		printf("ERROR: Cannot be implemented as we don't know the underlying particle data structure\n");
+		ERROR();
+	} else if(type==DATABUCKET_VIEW_BINARY) {
+		_DataBucketLoadFromFileBinary(filename,db);
+	} else {
+		printf("ERROR: Not implemented\n");
+		ERROR();
+	}
+}
+
+
+void _DataBucketViewBinary(DataBucket db,const char filename[])
+{
+	FILE *fp = NULL;
+	int f;
+	char *name;
+
+	/* create correct extension */
+	asprintf(&name,"%s_p00000.dat",filename );
+	fp = fopen(name,"wb");
+	if(fp==NULL){
+		printf("ERROR: Cannot open file with name %s \n", name);
+		ERROR();
+	}
+	free(name);
+	
+	/* db header */
+	_DataBucketViewAscii_HeaderWrite_v00(fp);
+	
+	/* meta-data */
+	fprintf(fp,"%d\n%d\n%d\n", db->L,db->buffer,db->nfields);
+
+	for( f=0; f<db->nfields; f++ ) {
+			/* load datafields */
+		_DataFieldViewBinary(db->field[f],fp);
+	}
+	
+	fclose(fp);
+}
+
+void DataBucketView(DataBucket db,const char filename[],DataBucketViewType type)
+{
+	printf("** DataBucketView **\n");
+	if(type==DATABUCKET_VIEW_STDOUT) {
+			
+	} else if(type==DATABUCKET_VIEW_ASCII) {
+		printf("ERROR: Cannot be implemented as we don't know the underlying particle data structure\n");
+		ERROR();
+	} else if(type==DATABUCKET_VIEW_BINARY) {
+		_DataBucketViewBinary(db,filename);
+	} else {
+		printf("ERROR: Not implemented\n");
+		ERROR();
+	}
+}

src/dm/impls/swarm/swarm_fields.h

+
+#ifndef __SWARM_FIELDS_H__
+#define __SWARM_FIELDS_H__
+
+
+#define DEFAULT -32654789
+
+#define DATAFIELD_POINT_ACCESS_GUARD 
+
+
+typedef enum { BFALSE=0, BTRUE } BTruth;
+typedef enum { DATABUCKET_VIEW_STDOUT=0, DATABUCKET_VIEW_ASCII, DATABUCKET_VIEW_BINARY, DATABUCKET_VIEW_HDF5 } DataBucketViewType;
+
+typedef struct _p_DataField* DataField;
+typedef struct _p_DataBucket* DataBucket;
+
+
+struct _p_DataField {
+	char   *registeration_function;
+	int    L;
+	BTruth active;
+	size_t atomic_size;
+	char   *name; /* what are they called */
+	void   *data; /* the data - an array of structs */
+};
+
+struct _p_DataBucket {
+	int L; /* number in use */
+	int buffer; /* memory buffer used for re-allocation */
+	int allocated;  /* number allocated, this will equal datafield->L */
+	BTruth finalised;
+	int nfields; /* how many fields of this type */
+	DataField *field; /* the data */
+};
+
+#define ERROR() {\
+printf("ERROR: %s() from line %d in %s !!\n", __FUNCTION__, __LINE__, __FILE__);\
+exit(EXIT_FAILURE);\
+}
+
+#define __DATATFIELD_point_access(data,index,atomic_size) (void*)((char*)(data) + (index)*(atomic_size))
+#define __DATATFIELD_point_access_offset(data,index,atomic_size,offset) (void*)((char*)(data) + (index)*(atomic_size) + (offset))
+
+
+
+void StringInList( const char name[], const int N, const DataField gfield[], BTruth *val );
+void StringFindInList( const char name[], const int N, const DataField gfield[], int *index );
+
+void DataFieldCreate( const char registeration_function[], const char name[], const size_t size, const int L, DataField *DF );
+void DataFieldDestroy( DataField *DF );
+void DataBucketCreate( DataBucket *DB );
+void DataBucketDestroy( DataBucket *DB );
+void _DataBucketRegisterField(
+															DataBucket db,
+															const char registeration_function[],
+															const char field_name[],
+															size_t atomic_size, DataField *_gfield );
+
+
+#define DataBucketRegisterField(db,name,size,k) {\
+  char *location;\
+  asprintf(&location,"Registered by %s() at line %d within file %s", __FUNCTION__, __LINE__, __FILE__);\
+  _DataBucketRegisterField( (db), location, (name), (size), (k) );\
+  free(location);\
+}
+
+void DataFieldGetNumEntries(DataField df, int *sum);
+void DataFieldSetSize( DataField df, const int new_L );
+void DataFieldZeroBlock( DataField df, const int start, const int end );
+void DataFieldGetAccess( const DataField gfield );
+void DataFieldAccessPoint( const DataField gfield, const int pid, void **ctx_p );
+void DataFieldAccessPointOffset( const DataField gfield, const size_t offset, const int pid, void **ctx_p );
+void DataFieldRestoreAccess( DataField gfield );
+void DataFieldVerifyAccess( const DataField gfield, const size_t size);
+void DataFieldInsertPoint( const DataField field, const int index, const void *ctx ); 
+void DataFieldCopyPoint( const int pid_x, const DataField field_x,
+												const int pid_y, const DataField field_y );
+void DataFieldZeroPoint( const DataField field, const int index ); 
+
+void DataBucketGetDataFieldByName(DataBucket db,const char name[],DataField *gfield);
+void DataBucketQueryDataFieldByName(DataBucket db,const char name[],BTruth *found);
+void DataBucketFinalize(DataBucket db);
+void DataBucketSetInitialSizes( DataBucket db, const int L, const int buffer );
+void DataBucketSetSizes( DataBucket db, const int L, const int buffer );
+void DataBucketGetSizes( DataBucket db, int *L, int *buffer, int *allocated );
+void DataBucketGetDataFields( DataBucket db, int *L, DataField *fields[] );
+
+void DataBucketCopyPoint( const DataBucket xb, const int pid_x,
+												 const DataBucket yb, const int pid_y );
+void DataBucketCreateFromSubset( DataBucket DBIn, const int N, const int list[], DataBucket *DB );
+void DataBucketZeroPoint( const DataBucket db, const int index );
+
+void DataBucketLoadFromFile(const char filename[], DataBucketViewType type, DataBucket *db);
+void DataBucketView(DataBucket db,const char filename[],DataBucketViewType type);
+
+void DataBucketAddPoint( DataBucket db );
+void DataBucketRemovePoint( DataBucket db );
+void DataBucketRemovePointAtIndex( const DataBucket db, const int index );
+
+#endif

src/dm/impls/swarm/swarmcreate.c

+#define PETSCDM_DLL
+#include <petsc-private/dmswarmimpl.h>    /*I   "petscdmswarm.h"   I*/
+#include <petscdmda.h>
+
+const char *const DMSwarmPlacements[] = {"LATTICE", "RANDOM", "DMSwarmPlacement", "DM_SWARM_PLACEMENT_", 0};
+
+#undef __FUNCT__
+#define __FUNCT__ "DMSetFromOptions_Swarm"
+PetscErrorCode  DMSetFromOptions_Swarm(DM dm)
+{
+  DM_Swarm      *swarm = (DM_Swarm *) dm->data;
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
+  ierr = PetscOptionsHead("DMSwarm Options");CHKERRQ(ierr);
+  ierr = PetscOptionsEnum("-dm_swarm_point_placement", "Method for laying out points, e.g. lattice, random", "DMSwarmSetPlacement", DMSwarmPlacements, (PetscEnum) swarm->pointPlacement, (PetscEnum *) &swarm->pointPlacement, NULL);CHKERRQ(ierr);
+  /* Handle DMSwarm refinement */
+  /* Handle viewing */
+  ierr = PetscOptionsTail();CHKERRQ(ierr);
+  PetscFunctionReturn(0);
+}
+
+/* External function declarations here */
+extern PetscErrorCode DMCreateCoordinateDM_Swarm(DM dm, DM *cdm);
+extern PetscErrorCode DMSetUp_Swarm(DM dm);
+extern PetscErrorCode DMDestroy_Swarm(DM dm);
+extern PetscErrorCode DMView_Swarm(DM dm, PetscViewer viewer);
+
+#undef __FUNCT__
+#define __FUNCT__ "DMCreateGlobalVector_Swarm"
+static PetscErrorCode DMCreateGlobalVector_Swarm(DM dm,Vec *vec)
+{
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  ierr = DMCreateGlobalVector_Section_Private(dm,vec);CHKERRQ(ierr);
+  /* ierr = VecSetOperation(*vec, VECOP_DUPLICATE, (void(*)(void)) VecDuplicate_MPI_DM);CHKERRQ(ierr); */
+  /* ierr = VecSetOperation(*vec, VECOP_VIEW, (void (*)(void))VecView_Swarm);CHKERRQ(ierr); */
+  PetscFunctionReturn(0);
+}
+
+#undef __FUNCT__
+#define __FUNCT__ "DMCreateLocalVector_Swarm"
+static PetscErrorCode DMCreateLocalVector_Swarm(DM dm,Vec *vec)
+{
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  ierr = DMCreateLocalVector_Section_Private(dm,vec);CHKERRQ(ierr);
+  /* ierr = VecSetOperation(*vec, VECOP_VIEW, (void(*)(void)) VecView_Swarm_Local);CHKERRQ(ierr); */
+  PetscFunctionReturn(0);
+}
+
+#undef __FUNCT__
+#define __FUNCT__ "DMInitialize_Swarm"
+PetscErrorCode DMInitialize_Swarm(DM dm)
+{
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  ierr = PetscStrallocpy(VECSTANDARD, (char**)&dm->vectype);CHKERRQ(ierr);
+
+  dm->ops->view                            = DMView_Swarm;
+  dm->ops->setfromoptions                  = DMSetFromOptions_Swarm;
+  dm->ops->setup                           = DMSetUp_Swarm;
+  dm->ops->createglobalvector              = DMCreateGlobalVector_Swarm;
+  dm->ops->createlocalvector               = DMCreateLocalVector_Swarm;
+  dm->ops->createlocaltoglobalmapping      = NULL;
+  dm->ops->createlocaltoglobalmappingblock = NULL;
+  dm->ops->createfieldis                   = NULL;
+  dm->ops->createcoordinatedm              = DMCreateCoordinateDM_Swarm;
+  dm->ops->getcoloring                     = 0;
+  dm->ops->creatematrix                    = NULL;
+  dm->ops->createinterpolation             = 0;
+  dm->ops->getaggregates                   = 0;
+  dm->ops->getinjection                    = 0;
+  dm->ops->refine                          = NULL;
+  dm->ops->coarsen                         = 0;
+  dm->ops->refinehierarchy                 = 0;
+  dm->ops->coarsenhierarchy                = 0;
+  dm->ops->globaltolocalbegin              = NULL;
+  dm->ops->globaltolocalend                = NULL;
+  dm->ops->localtoglobalbegin              = NULL;
+  dm->ops->localtoglobalend                = NULL;
+  dm->ops->destroy                         = DMDestroy_Swarm;
+  dm->ops->createsubdm                     = NULL;
+  dm->ops->locatepoints                    = NULL;
+  PetscFunctionReturn(0);
+}
+
+EXTERN_C_BEGIN
+#undef __FUNCT__
+#define __FUNCT__ "DMCreate_Swarm"
+PetscErrorCode DMCreate_Swarm(DM dm)
+{
+  DM_Swarm      *swarm;
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
+  ierr     = PetscNewLog(dm, DM_Swarm, &swarm);CHKERRQ(ierr);
+  dm->data = swarm;
+
+  swarm->refct = 1;
+  swarm->db    = NULL;
+  swarm->vdm   = NULL;
+  swarm->pointPlacement = DM_SWARM_PLACEMENT_LATTICE;
+
+  ierr = DMInitialize_Swarm(dm);CHKERRQ(ierr);
+  PetscFunctionReturn(0);
+}
+EXTERN_C_END
+
+#undef __FUNCT__
+#define __FUNCT__ "DMSwarmCreate"
+/*@
+  DMSwarmCreate - Creates a DMSwarm object, which encapsulates a set of particles. These are often used as Lagrangian traces and in the Material Point Method.
+
+  Collective on MPI_Comm
+
+  Input Parameter:
+. comm - The communicator for the DMSwarm object
+
+  Output Parameter:
+. mesh  - The DMSwarm object
+
+  Level: beginner
+
+.keywords: DMSwarm, create
+@*/
+PetscErrorCode DMSwarmCreate(MPI_Comm comm, DM *swamr)
+{
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  PetscValidPointer(swamr,2);
+  ierr = DMCreate(comm, swamr);CHKERRQ(ierr);
+  ierr = DMSetType(*swamr, DMSWARM);CHKERRQ(ierr);
+  PetscFunctionReturn(0);
+}
+
+#undef __FUNCT__
+#define __FUNCT__ "DMSwarmClone"
+/*@
+  DMSwarmClone - Creates a DMSwarm object with the same particles as the original.
+
+  Collective on MPI_Comm
+
+  Input Parameter:
+. dm - The original DMSwarm object
+
+  Output Parameter:
+. newdm  - The new DMSwarm object
+
+  Level: beginner
+
+.keywords: DMSwarm, create
+@*/
+PetscErrorCode DMSwarmClone(DM dm, DM *newdm)
+{
+  DM_Swarm      *swarm;
+  void          *ctx;
+  PetscErrorCode ierr;
+
+  PetscFunctionBegin;
+  PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
+  PetscValidPointer(newdm,2);
+  ierr         = DMCreate(PetscObjectComm((PetscObject)dm), newdm);CHKERRQ(ierr);
+  ierr         = PetscSFDestroy(&(*newdm)->sf);CHKERRQ(ierr);
+  ierr         = PetscObjectReference((PetscObject) dm->sf);CHKERRQ(ierr);
+  (*newdm)->sf = dm->sf;
+  swarm         = (DM_Swarm*) dm->data;
+  swarm->refct++;
+  (*newdm)->data = swarm;
+  ierr           = PetscObjectChangeTypeName((PetscObject) *newdm, DMSWARM);CHKERRQ(ierr);
+  ierr           = DMInitialize_Swarm(*newdm);CHKERRQ(ierr);
+  ierr           = DMGetApplicationContext(dm, &ctx);CHKERRQ(ierr);
+  ierr           = DMSetApplicationContext(*newdm, ctx);CHKERRQ(ierr);
+  PetscFunctionReturn(0);
+}