Commits

John ZuHone committed 592cf5f

Particles should now work for all implementations of the stream frontend so far. Also added a couple of tests.

Comments (0)

Files changed (3)

yt/frontends/stream/data_structures.py

 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None):
+                 fields, io = None, particle_types = {}):
         self.left_edges = left_edges
         self.right_edges = right_edges
         self.dimensions = dimensions
         self.num_grids = self.levels.size
         self.fields = fields
         self.io = io
-
+        self.particle_types = particle_types
+            
     def get_fields(self):
         return self.fields.all_fields
 
+    def get_particle_type(self, field) :
+
+        if self.particle_types.has_key(field) :
+            return self.particle_types[field]
+        else :
+            return False
+        
 class StreamHierarchy(AMRHierarchy):
 
     grid = StreamGrid
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
+            ptype = self.stream_handler.get_particle_type(field)
             # Note that we call add_field on the field_info directly.  This
             # will allow the same field detection mechanism to work for 1D, 2D
             # and 3D fields.
             self.pf.field_info.add_field(
-                    field, lambda a, b: None,
+                    field, lambda a, b: None, particle_type = ptype,
                     convert_function=cf, take_log=False)
 
     def _parse_hierarchy(self):
             self.io = io_registry[self.data_style](self.stream_handler)
 
     def update_data(self, data) :
+        
+        particle_types = set_particle_types(data[0])
 
         for key in data[0].keys() :
 
-            if key not in self.field_list: self.field_list.append(key)
+            if key is "number_of_particles": continue
             
+            self.stream_handler.particle_types[key] = particle_types[key]
+            
+            if key not in self.field_list:
+                self.field_list.append(key)
+
+        
         for i, grid in enumerate(self.grids) :
             
+            if data[i].has_key("number_of_particles") :
+
+                grid.NumberOfParticles = data[i].pop("number_of_particles")
+                
             for key in data[i].keys() :
 
                 self.stream_handler.fields[grid.id][key] = data[i][key]
-
+            
+        self._setup_unknown_fields()
         self._detect_fields()
         
 class StreamStaticOutput(StaticOutput):
     @property
     def all_fields(self): return self[0].keys()
 
+def set_particle_types(data) :
+
+    particle_types = {}
+    
+    for key in data.keys() :
+
+        if key is "number_of_particles": continue
+        
+        if len(data[key].shape) == 1:
+            particle_types[key] = True
+        else :
+            particle_types[key] = False
+    
+    return particle_types
+
+def assign_particle_data(pf, pdata) :
+    
+    if pf.h.num_grids > 1 :
+
+        try :
+            x = pdata["particle_position_x"]
+            y = pdata["particle_position_y"]
+            z = pdata["particle_position_z"]
+        except:
+            raise KeyError("Cannot decompose particle data without position fields!")
+        
+        particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+        
+        idxs = np.argsort(particle_grid_inds)
+        
+        particle_grid_count = np.bincount(particle_grid_inds,
+                                          minlength=pf.h.num_grids)
+        particle_indices = np.zeros(pf.h.num_grids + 1, dtype='int64')
+        
+        if pf.h.num_grids > 1 :
+            np.add.accumulate(particle_grid_count.squeeze(),
+                              out=particle_indices[1:])
+        else :
+            particle_indices[1] = particle_grid_count.squeeze()
+            
+        pdata.pop("number_of_particles")
+        
+        grid_pdata = []
+        
+        for i, pcount in enumerate(particle_grid_count) :
+            
+            grid = {}
+            
+            grid["number_of_particles"] = pcount
+            
+            start = particle_indices[i]
+            end = particle_indices[i+1]
+            
+            for key in pdata.keys() :
+                
+                grid[key] = pdata[key][idxs][start:end]
+                
+            grid_pdata.append(grid)
+
+    else :
+
+        grid_pdata = [pdata]
+        
+    pf.h.update_data(grid_pdata)
+                                        
 def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                      nprocs=1, sim_time=0.0, number_of_particles=0):
+                      nprocs=1, sim_time=0.0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
           disappointing or non-existent in most cases.
         * Particles may be difficult to integrate.
 
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
     Parameters
     ----------
     data : dict
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     sfh = StreamDictFieldHandler()
-
+    
+    if data.has_key("number_of_particles") :
+        number_of_particles = data.pop("number_of_particles")
+    else :
+        number_of_particles = int(0)
+    
+    if number_of_particles > 0 :
+        particle_types = set_particle_types(data)
+        pdata = {}
+        pdata["number_of_particles"] = number_of_particles
+        for key in data.keys() :
+            if len(data[key].shape) == 1 :
+                pdata[key] = data.pop(key)
+    else :
+        particle_types = {}
+    
     if nprocs > 1:
         temp = {}
         new_data = {}
         for key in data.keys():
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
-                decompose_array(data[key], psize, bbox)
+                             decompose_array(data[key], psize, bbox)
             grid_dimensions = np.array([grid.shape for grid in temp[key]],
                                        dtype="int32")
         for gid in range(nprocs):
         grid_dimensions,
         grid_levels,
         -np.ones(nprocs, dtype='int64'),
-        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
+        particle_types=particle_types
     )
 
     handler.name = "UniformGridData"
     box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    # Now figure out where the particles go
+
+    if number_of_particles > 0 :
+        assign_particle_data(spf, pdata)
+    
     return spf
 
 def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                   sim_time=0.0, number_of_particles=0):
+                   sim_time=0.0):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
     grid_data : list of dicts
         This is a list of dicts.  Each dict must have entries "left_edge",
         "right_edge", "dimensions", "level", and then any remaining entries are
-        assumed to be fields.  This will be modified in place and can't be
-        assumed to be static..
+        assumed to be fields.  They also may include a particle count, otherwise
+        assumed to be zero. This will be modified in place and can't be
+        assumed to be static.
     domain_dimensions : array_like
         This is the domain dimensions of the grid
     sim_unit_to_cm : float
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
     ...     dict(left_edge = [0.0, 0.0, 0.0],
     ...          right_edge = [1.0, 1.0, 1.],
     ...          level = 0,
-    ...          dimensions = [32, 32, 32]),
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ...     dict(left_edge = [0.25, 0.25, 0.25],
     ...          right_edge = [0.75, 0.75, 0.75],
     ...          level = 1,
-    ...          dimensions = [32, 32, 32])
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ... ]
     ... 
     >>> for g in grid_data:
     grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    number_of_particles = np.zeros((ngrids,1), dtype='int64')
     sfh = StreamDictFieldHandler()
     for i, g in enumerate(grid_data):
         grid_left_edges[i,:] = g.pop("left_edge")
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
+        if g.has_key("number_of_particles") :
+            number_of_particles[i,:] = g.pop("number_of_particles")  
         sfh[i] = g
-
+            
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
         None, # parent_ids is none
-        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        number_of_particles,
         np.zeros(ngrids).reshape((ngrids,1)),
         sfh,
+        particle_types=set_particle_types(grid_data[0])
     )
 
     handler.name = "AMRGridData"
     >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
     >>> pf = refine_amr(ug, rc, fo, 5)
     """
+
+    # If we have particle data, set it aside for now
+
+    number_of_particles = np.sum([grid.NumberOfParticles
+                                  for grid in base_pf.h.grids])
+
+    if number_of_particles > 0 :
+        pdata = {}
+        for field in base_pf.h.field_list :
+            if base_pf.field_info[field].particle_type :
+                pdata[field] = np.concatenate([grid[field]
+                                               for grid in base_pf.h.grids])
+        pdata["number_of_particles"] = number_of_particles
+        
     last_gc = base_pf.h.num_grids
     cur_gc = -1
     pf = base_pf    
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                gd[field] = g[field]
+                if not pf.field_info[field].particle_type :
+                    gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
             fg = FlaggingGrid(g, refinement_criteria)
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    gd[field] = grid[field]
+                    if not pf.field_info[field].particle_type :
+                        gd[field] = grid[field]
                 grid_data.append(gd)
         pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
         cur_gc = pf.h.num_grids
+
+    # Now reassign particle data to grids
+
+    if number_of_particles > 0 : assign_particle_data(pf, pdata)
+    
     return pf

yt/frontends/stream/tests/test_stream_particles.py

+import numpy as np
+from yt.testing import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+
+def setup() :
+    pass
+
+# Field information
+
+def test_stream_particles() :
+    
+    num_particles = 100000
+    domain_dims = (64, 64, 64)
+    dens = np.random.random(domain_dims) 
+    x = np.random.uniform(size=num_particles)
+    y = np.random.uniform(size=num_particles)
+    z = np.random.uniform(size=num_particles)
+    m = np.ones((num_particles))
+
+    # Field operators and cell flagging methods
+
+    fo = []
+    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"Density": 2.0}))
+    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"Density": 20.0}))
+    rc = [fm.flagging_method_registry["overdensity"](1.0)]
+    
+    # Check that all of this runs ok without particles
+    
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
+    amr0 = refine_amr(ug0, rc, fo, 3)
+
+    grid_data = []
+    
+    for grid in amr0.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+    
+        for field in amr0.h.field_list :
+            
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+
+    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
+                        
+    # Now add particles
+
+    fields1 = {"Density": dens,
+               "particle_position_x": x,
+               "particle_position_y": y,
+               "particle_position_z": z,
+               "particle_mass": m,
+               "number_of_particles": num_particles}
+
+    fields2 = fields1.copy()
+
+    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
+    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.h.grids])
+    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.h.grids])
+    
+    assert number_of_particles1 == num_particles
+    assert number_of_particles1 == number_of_particles2
+
+    # Check to make sure the fields have been defined correctly
+    
+    assert ug1.field_info["particle_position_x"].particle_type
+    assert ug1.field_info["particle_position_y"].particle_type
+    assert ug1.field_info["particle_position_z"].particle_type
+    assert ug1.field_info["particle_mass"].particle_type
+    assert not ug1.field_info["Density"].particle_type
+
+    assert ug2.field_info["particle_position_x"].particle_type
+    assert ug2.field_info["particle_position_y"].particle_type
+    assert ug2.field_info["particle_position_z"].particle_type
+    assert ug2.field_info["particle_mass"].particle_type
+    assert not ug2.field_info["Density"].particle_type
+    
+    # Now refine this
+
+    amr1 = refine_amr(ug1, rc, fo, 3)
+    
+    grid_data = []
+    
+    for grid in amr1.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+
+        for field in amr1.h.field_list :
+
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+    
+    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)
+
+    # Check everything again
+
+    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.h.grids]
+    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.h.grids]
+    
+    assert np.sum(number_of_particles1) == num_particles
+    assert_equal(number_of_particles1, number_of_particles2)
+    
+    assert amr1.field_info["particle_position_x"].particle_type
+    assert amr1.field_info["particle_position_y"].particle_type
+    assert amr1.field_info["particle_position_z"].particle_type
+    assert amr1.field_info["particle_mass"].particle_type
+    assert not amr1.field_info["Density"].particle_type
+    
+    assert amr2.field_info["particle_position_x"].particle_type
+    assert amr2.field_info["particle_position_y"].particle_type
+    assert amr2.field_info["particle_position_z"].particle_type
+    assert amr2.field_info["particle_mass"].particle_type
+    assert not amr2.field_info["Density"].particle_type
+

yt/frontends/stream/tests/test_update_data.py

+from yt.testing import *
+from yt.data_objects.profiles import BinnedProfile1D
+from numpy.random import uniform
+
+def setup():
+    global pf
+    pf = fake_random_pf(64, nprocs=8)
+    pf.h
+    
+def test_update_data() :
+    dims = (32,32,32)
+    grid_data = [{"Temperature":uniform(size=dims)}
+                 for i in xrange(pf.h.num_grids)]
+    pf.h.update_data(grid_data)
+    prj = pf.h.proj(2, "Temperature")
+    prj["Temperature"]
+    dd = pf.h.all_data()
+    profile = BinnedProfile1D(dd, 10, "Density",
+                              dd["Density"].min(),
+                              dd["Density"].max())
+    profile.add_fields(["Temperature"])
+    profile["Temperature"]
+