Christopher Moody avatar Christopher Moody committed 1503e0a

hilbert frameworks works; wrong ordering though

Comments (0)

Files changed (10)

yt/analysis_modules/halo_finding/halo_objects.py

         self.max_radius=Rvir
         self.bulk_vel=na.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
-        self.group_total_mass = -1 #not implemented
-        
-        
+        self.group_total_mass = -1 #not implemented 
     
     def maximum_density(self):
         r"""Not implemented."""
         Jc = 1.0
         conv = dict(X=1.0/pf['mpchcm'],
                     Y=1.0/pf['mpchcm'],
-                    Z=1.0/pf['mpchcm'],
-                    VX=1e0,VY=1e0,VZ=1e0,
-                    Mvir=1.0,
+                    Z=1.0/pf['mpchcm'], #to unitary
+                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
+                    Mvir=1.0, #Msun/h
                     Vmax=1e0,Vrms=1e0,
-                    Rvir=1.0/pf['mpchcm'],
-                    Rs=1.0/pf['mpchcm'],
+                    Rvir=1.0/pf['kpchcm'],
+                    Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
         halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            

yt/analysis_modules/halo_finding/rockstar/rockstar.py

         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 0, num_writers = 0, outbase=None):
+    def __init__(self, pf, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,overwrite=False,
+            left_edge = None, right_edge = None):
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
         self.pf = pf
         self.hierarchy = pf.h
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
+        self.particle_mass = particle_mass 
+        self.overwrite = overwrite
+        if left_edge is None:
+            left_edge = pf.domain_left_edge
+        if right_edge is None:
+            right_edge = pf.domain_right_edge
+        self.le = left_edge
+        self.re = right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
+            print '%i reader + %i writers != %i mpi'%\
+                    (self.num_reader,self.num_writers,self.comm.size)
             raise RuntimeError
         self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         data_source = None
         self.port = str(self.port)
 
     def run(self, block_ratio = 1,**kwargs):
+        """
+        
+        """
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         #because rockstar *always* write to exactly the same
         #out_0.list filename we make a directory for it
         #to sit inside so it doesn't get accidentally
-        #overwritten
-        
+        #overwritten 
         if self.workgroup.name == "server":
-            os.mkdir(self.outbase)
+            if not os.path.exists(self.outbase):
+                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
+                    particle_mass = float(self.particle_mass),
                     **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
             if self.workgroup.name == "server":
                 self.handler.start_server()
             elif self.workgroup.name == "readers":
-                #time.sleep(0.5 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
             elif self.workgroup.name == "writers":
-                #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
         self.comm.barrier()
         #quickly rename the out_0.list 
     
-    def halo_list(self):
+    def halo_list(self,file_name='out_0.list'):
         """
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        return RockstarHaloList(self.pf,self.outbase+'/out_0.list')
+        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)

yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx

 cdef RockstarInterface rh
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
     cdef int i, fi, npart, tnpart
-    cdef np.float64_t conv[6], left_edge[6]
+    cdef np.float64_t conv[6], left_edge[6], right_edge[3]
     dd = rh.data_source
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
     #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = rh.le[0]
+    left_edge[1] = rh.le[1]
+    left_edge[2] = rh.le[2]
+    right_edge[0] = rh.re[0]
+    right_edge[1] = rh.re[1]
+    right_edge[2] = rh.re[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
             for i in range(npart):
+                if fi<3: 
+                    if  left_edge[i] > arr[i]: continue
+                    if right_edge[i] < arr[i]: continue
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, 
+                       int periodic = 1, int min_halo_size = 20,
                        char *outbase = 'None'):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh, SCALE_NOW, OUTBASE
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address

yt/analysis_modules/sunrise_export/sunrise_exporter.py

 """
 Code to export from yt to Sunrise
 
+Author: Chris Moody <juxtaposicion@gmail.com>
+Affiliation: UCSC
 Author: Matthew Turk <matthewturk@gmail.com>
 Affiliation: UCSD
 Homepage: http://yt-project.org/
 
 try:
     import pyfits
-except ImportError:
-    # We silently fail here
+except ImportError: 
     pass
 
 import time
 from yt.funcs import *
 import yt.utilities.amr_utils as amr_utils
 from yt.data_objects.universal_fields import add_field
+from yt.mods import *
 
-from os import environ
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, ProcessorPool, Communicator
+debug = True
 
-def export_to_sunrise(pf, fn, write_particles = True, subregion_bounds = None,
-    particle_mass=None, particle_pos=None, particle_age=None, particle_metal=None,
-    parallel=False):
+def export_to_sunrise(pf, fn, star_particle_type, dle, dre,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
     pf : `StaticOutput`
         The parameter file to convert.
     fn : string
-        The filename of the FITS file.
-    write_particles : bool or pyfits.ColDefs instance, default is True
-        Whether to write out the star particles or not.  If this variable is an
-        instance of pyfits.ColDefs, then this will be used to create a pyfits
-        table named PARTICLEDATA which will be appended.  If this is true, the
-        routine will attempt to create this table from hand.
-    subregion_bounds : list of tuples
-        This is a list of tuples describing the subregion of the top grid to
-        export.  This will only work when only *one* root grid exists.
-        It is of the format:
-        [ (start_index_x, nx), (start_index_y, ny), (start_index_z, nz) ]
-        where nx, ny, nz are the number of cells to extract.
+        The filename of the output FITS file.
+    dle : The domain left edge to extract
+    dre : The domain rght edge to extract
+        Array format is (nx,ny,nz) where each element is floating point
+        in unitary position units where 0 is leftmost edge and 1
+        the rightmost. 
+        
 
     Notes
     -----
     http://sunrise.googlecode.com/ for more information.
 
     """
-    # Now particles
-    #  output_file->addTable("PARTICLEDATA" , 0);
-    # addKey("timeunit", time_unit, "Time unit is "+time_unit);
-    # addKey("tempunit", temp_unit, "Temperature unit is "+temp_unit);
-    # 
-    # addColumn(Tint, "ID", 1, "" );
-    # addColumn(Tdouble, "position", 3, length_unit );
-    # addColumn(Tdouble, "stellar_radius", 1, length_unit );
-    # addColumn(Tdouble, "L_bol", 1, L_bol_unit );
-    # addColumn(Tdouble, "mass_stars", 1, mass_unit );
-    # addColumn(Tdouble, "mass_stellar_metals", 1, mass_unit );
-    # addColumn(Tdouble, "age_m", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tdouble, "age_l", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tfloat, "L_lambda", L_lambda.columns(), 
-    #			L_lambda_unit );
-    #	output->addKey("logflux", true, "Column L_lambda values are log (L_lambda)");
+    
+    #we must round the dle,dre to the nearest root grid cells
+    ile,ire,super_level= round_nearest_edge(pf,dle,dre)
+    super_level -= 1 #we're off by one (so we don't need a correction if we span 2 cells)
+    fle,fre = ile*1.0/pf.domain_dimensions, ire*1.0/pf.domain_dimensions
+    mylog.info("rounding specified region:")
+    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(dle)+tuple(dre)))
+    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
+    mylog.info("to   [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre)))
 
-    col_list = []
-    if subregion_bounds == None:    
-        DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        DX = pf.domain_dimensions
-    else:
-        DLE, DX = zip(*subregion_bounds)
-        DLE, DX = na.array(DLE), na.array(DX)
-        DRE = DLE + DX
-    reg = pf.h.region((DRE+DLE)/2.0, DLE, DRE)
 
-    if write_particles is True:
-        pi = reg["particle_type"] == 2
-        pos = na.array([reg["particle_position_%s" % ax][pi]*pf['kpc']
-                            for ax in 'xyz']).transpose()
-        vel = na.array([reg["particle_velocity_%s" % ax][pi]
-                            for ax in 'xyz']).transpose()
-        # Velocity is cm/s, we want it to be kpc/yr
-        vel *= (pf["kpc"]/pf["cm"]) / (365*24*3400.)
-        age = pf["years"] * (pf.current_time - reg["creation_time"][pi])
-        creation_time = reg["creation_time"][pi] * pf["years"]
+    #Create the refinement hilbert octree in GRIDSTRUCTURE
+    #For every leaf (not-refined) cell we have a column n GRIDDATA
+    #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR
+    #since the octree always starts with one cell, an our 0-level mesh
+    #may have many cells, we must #create the octree region sitting 
+    #ontop of the first mesh by providing a negative level
+    output, refinement = prepare_octree(pf,ile,start_level=-super_level)
 
-        initial_mass = reg["InitialMassCenOstriker"][pi]
-        current_mass = reg["ParticleMassMsun"][pi]
-        col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
-        col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
-        col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
-        col_list.append(pyfits.Column("formation_time", format="D", array=creation_time, unit="yr"))
-        col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
-        col_list.append(pyfits.Column("age_m", format="D", array=age))
-        col_list.append(pyfits.Column("age_l", format="D", array=age))
-        #For particles, Sunrise takes 
-        #the dimensionless metallicity, not the mass of the metals
-        col_list.append(pyfits.Column("metallicity", format="D",
-            array=reg["metallicity_fraction"][pi],unit="Msun")) # wrong?
-        col_list.append(pyfits.Column("L_bol", format="D",
-            array=na.zeros(particle_mass.size)))
+    #Create a list of the star particle properties in PARTICLE_DATA
+    #Include ID, parent-ID, position, velocity, creation_mass, 
+    #formation_time, mass, age_m, age_l, metallicity, L_bol
+    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,**kwargs)
 
-        cols = pyfits.ColDefs(col_list)
-        pd_table = pyfits.new_table(cols)
-        pd_table.name = "PARTICLEDATA"
-    elif isinstance(write_particles, pyfits.ColDefs):
-        pd_table = pyfits.new_table(write_particles)
-        pd_table.name = "PARTICLEDATA"
-        write_particles = True
+    create_fits_file(pf,fn, refinement,output,particle_data,fre,fle)
 
-    def _MetalMass(field, data):
-        return data["Metal_Density"] * data["CellVolume"]
-        
-    def _convMetalMass(data):
-        return 1.0/1.989e33
-        
-    add_field("MetalMass", function=_MetalMass,
-              convert_function=_convMetalMass)
+def prepare_octree(pf,ile,start_level=0):
+    add_fields() #add the metal mass field that sunrise wants
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    
+    #gather the field data from octs
+    pbar = get_pbar("Retrieving field data",len(fields))
+    field_data = [] 
+    dd = pf.h.all_data()
+    for fi,f in enumerate(fields):
+        field_data += dd[f],
+        pbar.update(fi)
+    pbar.finish()
+    del field_data
 
-    output, refined = generate_flat_octree(pf,
-            ["CellMassMsun","TemperatureTimesCellMassMsun", "MetalMass",
-             "CellVolumeCode"], subregion_bounds = subregion_bounds,
-            parallel=parallel)
-    cvcgs = output["CellVolumeCode"].astype('float64') * pf['cm']**3.0
+    #first we cast every cell as an oct
+    #ngrids = na.max([g.id for g in pf._grids])
+    grids = {}
+    levels_all = {} 
+    levels_finest = {}
+    for l in range(100): 
+        levels_finest[l]=0
+        levels_all[l]=0
+    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+    for gi,g in enumerate(pf.h.grids):
+        ff = na.array([g[f] for f in fields])
+        og = amr_utils.OctreeGrid(
+                g.child_index_mask.astype('int32'),
+                ff.astype("float64"),
+                g.LeftEdge.astype("float64"),
+                g.ActiveDimensions.astype("int32"),
+                na.ones(1,dtype="float64")*g.dds[0],
+                g.Level,
+                g.id)
+        grids[g.id] = og
+        #how many refinement cells will we have?
+        #measure the 'volume' of each mesh, but many
+        #cells do not exist. an overstimate
+        levels_all[g.Level] += g.ActiveDimensions.prod()
+        #how many leaves do we have?
+        #this overestimates. a child of -1 means no child,
+        #but that cell may still be expanded on a submesh because
+        #(at least in ART) the meshes are inefficient.
+        g.clear_data()
+        pbar.update(gi)
+    pbar.finish()
+    
+    #create the octree grid list
+    oct_list =  amr_utils.OctreeGridList(grids)
+    
+    #initialize arrays to be passed to the recursion algo
+    o_length = na.sum(levels_all.values())
+    r_length = na.sum(levels_all.values())
+    output   = na.zeros((o_length,len(fields)), dtype='float64')
+    refined  = na.zeros(r_length, dtype='int32')
+    levels   = na.zeros(r_length, dtype='int32')
+    pos = position()
+    hs       = hilbert_state()
+    refined[0] = 1 #introduce the first cell as divided
+    levels[0]  = start_level-1 #introduce the first cell as divided
+    pos.refined_pos += 1
+    RecurseOctreeDepthFirstHilbert(
+            ile[0],ile[1],ile[2],
+            pos,0, hs, 
+            output,refined,levels,
+            grids,
+            start_level,
+            #physical_center = (ile)*1.0/pf.domain_dimensions*pf['kpc'],
+            physical_center = ile,
+            #physical_width  = pf['kpc'])
+            physical_width  = pf.domain_dimensions)
+    #by time we get it here the 'current' position is actually 
+    #for the next spot, so we're off by 1
+    print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    output  = output[:pos.output_pos]
+    refined = refined[:pos.refined_pos] 
+    levels = levels[:pos.refined_pos] 
+    return output,refined
 
-    # First the structure
+def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
+                            curpos, gi, 
+                            hs,
+                            output,
+                            refined,
+                            levels,
+                            grids,
+                            level,
+                            physical_center=None,
+                            physical_width=None):
+    grid = grids[gi]
+    m = 2**(-level-1) if level < 0 else 1
+    ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
+    pre = ple+grid.dx*m
+    print level,ple*physical_width-physical_center,
+    print pre*physical_width-physical_center, hs.parent_octant
+
+
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
+    for iv, vertex in enumerate(hs):
+        #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
+        #negative level indicates that we need to build a super-octree
+        if level < 0: 
+            #print ' '
+            #we are not on the root grid yet, but this is 
+            #how many equivalent root grid cells we would have
+            #level -1 means our oct grid's children are the same size
+            #as the root grid (hence the -level-1)
+            dx = 2**(-level-1) #this is the child width 
+            i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
+            #print level,iv,vertex,
+            #print na.array([cx,cy,cz])/128.0*physical_width-physical_center,
+            #print na.array([cx+dx,cy+dx,cz+dx])/128.0*physical_width-physical_center
+            #we always refine the negative levels
+            hs_child = hilbert_state(vertex.copy())
+            refined[curpos.refined_pos] = 1
+            levels[curpos.refined_pos] = level
+            curpos.refined_pos += 1
+            RecurseOctreeDepthFirstHilbert(i, j, k,
+                                curpos, 0, hs_child, output, refined, levels, grids,
+                                level+1,
+                                physical_center=physical_center,
+                                physical_width=physical_width,)
+        else:
+            i,j,k = xi+vertex[0],yi+vertex[1],zi+vertex[2]
+            ci = grid.child_indices[i,j,k] #is this oct subdivided?
+            if ci == -1:
+                for fi in range(grid.fields.shape[0]):
+                    output[curpos.output_pos,fi] = grid.fields[fi,i,j,k]
+                refined[curpos.refined_pos] = 0
+                levels[curpos.refined_pos] = level
+                curpos.output_pos += 1 #position updated after write
+                curpos.refined_pos += 1
+                print level+1, #these child cells are a level deeper
+                print (grid.left_edges+na.array([i,j,k])*grid.dx)*physical_width-physical_center, #parent LE 
+                print (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*physical_width-physical_center, #parent LE 
+                print iv,vertex
+            else:
+                cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
+                cy = (grid.left_edges[1] + j*grid.dx[0])
+                cz = (grid.left_edges[2] + k*grid.dx[0])
+                #print level,iv,vertex,
+                #print na.array([cx,cy,cz])*physical_width -physical_center,
+                #print na.array([cx+grid.dx[0],cy+grid.dx[0],cz+grid.dx[0]])*physical_width - physical_center
+                hs_child = hilbert_state(vertex.copy())
+                refined[curpos.refined_pos] = 1
+                levels[curpos.refined_pos] = level
+                curpos.refined_pos += 1 #position updated after write
+                child_grid = grids[ci]
+                child_dx = child_grid.dx[0]
+                child_leftedges = child_grid.left_edges
+                child_i = int((cx - child_leftedges[0])/child_dx)
+                child_j = int((cy - child_leftedges[1])/child_dx)
+                child_k = int((cz - child_leftedges[2])/child_dx)
+                RecurseOctreeDepthFirstHilbert(child_i, child_j, child_k,
+                                    curpos, ci, hs_child, output, refined, levels, grids,
+                                    level+1,
+                                    physical_center=physical_center,
+                                    physical_width=physical_width)
+
+def create_fits_file(pf,fn, refined,output,particle_data,fre,fle):
+
+    #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
     st_table = pyfits.new_table(cols)
     st_table.name = "GRIDSTRUCTURE"
+    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
+    fdx = fre-fle
+    print 'WARNING: debug limits set on minxyz maxxyz'
+    for i,a in enumerate('xyz'):
+        #st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
+        #st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
+        st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
+        st_table.header.update("max%s" % a, 2) #
+        st_table.header.update("n%s" % a, fdx[i])
+        st_table.header.update("subdiv%s" % a, 2)
+    st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
 
-    # Now we update our table with units
-    # ("lengthunit", length_unit, "Length unit for grid");
-    # ("minx", getmin () [0], length_unit_comment);
-    # ("miny", getmin () [1], length_unit_comment);
-    # ("minz", getmin () [2], length_unit_comment);
-    # ("maxx", getmax () [0], length_unit_comment);
-    # ("maxy", getmax () [1], length_unit_comment);
-    # ("maxz", getmax () [2], length_unit_comment);
-    # ("nx", g_.getn () [0], "");
-    # ("ny", g_.getn () [1], "");
-    # ("nz", g_.getn () [2], "");
-    # ("subdivtp", subdivtp, "Type of grid subdivision");
-    # ("subdivx", sub_div[0], "");
-    # ("subdivy", sub_div[1], "");
-    # ("subdivz", sub_div[2], "");
-
-    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
-    for i,a in enumerate('xyz'):
-        st_table.header.update("min%s" % a, DLE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("max%s" % a, DRE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("n%s" % a, DX[i])
-        st_table.header.update("subdiv%s" % a, 2)
-    st_table.header.update("subdivtp", "UNIFORM", "Type of grid subdivision")
-
-    # Now grid data itself
-    # ("M_g_tot", total_quantities.m_g(), "[" + mass_unit +
-    #         "] Total gas mass in all cells");
-    # ("SFR_tot", total_quantities.SFR, "[" + SFR_unit +
-    #         "] Total star formation rate of all cells");
-    # ("timeunit", time_unit, "Time unit is "+time_unit);
-    # ("tempunit", temp_unit, "Temperature unit is "+time_unit);
-
-    # (Tdouble, "mass_gas", 1, mass_unit );
-    # (Tdouble, "SFR", 1, SFR_unit );
-    # (Tdouble, "mass_metals", 1, mass_unit );
-    # (Tdouble, "gas_temp_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "gas_teff_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "cell_volume", 1, length_unit + "^3" );
-
+    #not the hydro grid data
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    fd = {}
+    for i,f in enumerate(fields): 
+        fd[f]=output[:,i]
+    del output
     col_list = []
-    size = output["CellMassMsun"].size
-    tm = output["CellMassMsun"].sum()
+    size = fd["CellMassMsun"].size
+    tm = fd["CellMassMsun"].sum()
     col_list.append(pyfits.Column("mass_gas", format='D',
-                    array=output.pop('CellMassMsun'), unit="Msun"))
+                    array=fd['CellMassMsun'], unit="Msun"))
     col_list.append(pyfits.Column("mass_metals", format='D',
-                    array=output.pop('MetalMass'), unit="Msun"))
+                    array=fd['MetalMass'], unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stars", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("age_m", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("age_l", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("L_bol", format='D',
+    #                 array=na.zeros(size,dtype='D')))
+    # col_list.append(pyfits.Column("L_lambda", format='D',
+    #                 array=na.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
-                    array=output['TemperatureTimesCellMassMsun'], unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("gas_teff_m", format='D',
-                    array=output.pop('TemperatureTimesCellMassMsun'), unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("cell_volume", format='D',
-                    array=output.pop('CellVolumeCode').astype('float64')*pf['kpc']**3.0,
+                    array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
                     array=na.zeros(size, dtype='D')))
     md_table.header.update("snaptime", pf.current_time*pf['years'])
     md_table.name = "YT"
 
-    hls = [pyfits.PrimaryHDU(), st_table, mg_table,md_table]
-    if write_particles: hls.append(pd_table)
+    phdu = pyfits.PrimaryHDU()
+    phdu.header.update('nbodycod','yt')
+    hls = [phdu, st_table, mg_table,md_table]
+    hls.append(particle_data)
     hdus = pyfits.HDUList(hls)
     hdus.writeto(fn, clobber=True)
 
-def initialize_octree_list_task(g,fields, grids = [], 
-        levels_finest = defaultdict(lambda: 0), 
-        levels_all = defaultdict(lambda: 0)):
-    ff = na.array([g[f] for f in fields])
-    grids.append(amr_utils.OctreeGrid(
-                    g.child_index_mask.astype('int32'),
-                    ff.astype("float64"),
-                    g.LeftEdge.astype('float64'),
-                    g.ActiveDimensions.astype('int32'),
-                    na.ones(1,dtype='float64') * g.dds[0], g.Level,
-                    g._id_offset))
-    levels_all[g.Level] += g.ActiveDimensions.prod()
-    levels_finest[g.Level] += g.child_mask.ravel().sum()
-    g.clear_data()
-    return grids,levels_finest,levels_all
+def nearest_power(x):
+    #round to the nearest power of 2
+    x-=1
+    x |= x >> 1
+    x |= x >> 2 
+    x |= x >> 4
+    x |= x >> 8
+    x |= x >> 16
+    x+=1 
+    return x
 
-def initialize_octree_list(pf, fields,parallel=False):
-    #import pdb; pdb.set_trace()
-    i=0
-    o_length = r_length = 0
-    grids = []
-    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+def round_nearest_edge(pf,dle,dre):
+    dds = pf.domain_dimensions
+    ile = na.floor(dle*dds).astype('int')
+    ire = na.ceil(dre*dds).astype('int') 
     
-    grids = []
-    levels_finest, levels_all = defaultdict(lambda: 0), defaultdict(lambda: 0)
- 
-    import pdb; pdb.set_trace()
-    if not parallel:
-        for g in pf.h.grids:
-            i+=1
-            tgrids,tlevels_finest,tlevels_all = \
-                initialize_octree_list_task(g,fields,grids=grids,
-                        levels_finest=levels_finest,
-                        levels_all=levels_all)
-            pbar.update(i)
-    else:
-        import multiprocessing
-        nbr_chunks = multiprocessing.cpu_count()
-        chunk_size = len(pf.h.grids) / nbr_chunks
-        if chunk_size % nbr_chunks != 0:
-            # make sure we get the last few items of data when we have
-            # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3
-            nbr_chunks += 1
-        chunks = [(pf.h.grids[x*chunk_size:(x+1)*chunk_size],fields) \
-            for x in xrange(nbr_chunks)]
+    #this is the number of cells the super octree needs to expand to
+    #must round to the nearest power of 2
+    width = na.max(ire-ile)
+    width = nearest_power(width)
+    
+    maxlevel = na.rint(na.log2(width)).astype('int')
+    return ile,ire,maxlevel
 
-        p = multiprocessing.Pool()
-        # send out the work chunks to the Pool
-        # po is a multiprocessing.pool.MapResult
-        po = p.map_async(initialize_octree_list_task,chunks)
-        # we get a list of lists back, one per chunk, so we have to
-        # flatten them back together
-        # po.get() will block until results are ready and then
-        # return a list of lists of results
-        results = po.get()
+def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
+                          creation_time=None,initial_mass=None,
+                          current_mass=None,metallicity=None,
+                          radius = None,
+                          fle=[0.,0.,0.],fre=[1.,1.,1.]):
+    dd = pf.h.all_data()
+    idx = dd["particle_type"] == star_type
+    if pos is None:
+        pos = na.array([dd["particle_position_%s" % ax]
+                        for ax in 'xyz']).transpose()
+    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    pos = pos[idx]*pf['kpc'] #unitary units -> kpc
+    if age is None:
+        age = dd["particle_age"][idx]*pf['years'] # seconds->years
+    if vel is None:
+        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+                        for ax in 'xyz']).transpose()
+        # Velocity is cm/s, we want it to be kpc/yr
+        #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
+        vel *= 1.02268944e-14 
+    if initial_mass is None:
+        #in solar masses
+        initial_mass = dd["particle_mass_initial"][idx]*pf['Msun']
+    if current_mass is None:
+        #in solar masses
+        current_mass = dd["particle_mass"][idx]*pf['Msun']
+    if metallicity is None:
+        #this should be in dimensionless units, metals mass / particle mass
+        metallicity = dd["particle_metallicity"][idx]
+    if radius is None:
+        radius = initial_mass*0.0+10.0/1000.0 #10pc radius
 
-        for tgrids,tlevels_finest,tlevels_all in results:
-            grids += tgrids
-            for k,v in tlevels_finest.iteritems():
-                levels_finest[k] += v
-            for k,v in  tlevels_all.iteritems():
-                levels_all[k] += v
+    formation_time = pf.current_time-age
+    #create every column
+    col_list = []
+    col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
+    col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
+    col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
+    col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
+    col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
+    col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
+    col_list.append(pyfits.Column("age_m", format="D", array=age))
+    col_list.append(pyfits.Column("age_l", format="D", array=age))
+    #For particles, Sunrise takes 
+    #the dimensionless metallicity, not the mass of the metals
+    col_list.append(pyfits.Column("metallicity", format="D",
+        array=metallicity,unit="Msun")) 
+    col_list.append(pyfits.Column("L_bol", format="D",
+        array=na.zeros(current_mass.size)))
+    
+    #make the table
+    cols = pyfits.ColDefs(col_list)
+    pd_table = pyfits.new_table(cols)
+    pd_table.name = "PARTICLEDATA"
+    return pd_table
 
 
-    pbar.finish()
-    ogl = amr_utils.OctreeGridList(grids)
-    return ogl, levels_finest, levels_all
+def add_fields():
+    """Add three Eulerian fields Sunrise uses"""
+    def _MetalMass(field, data):
+        return data["Metal_Density"] * data["CellVolume"]
+        
+    def _convMetalMass(data):
+        return 1.0/1.989e33
+    
+    add_field("MetalMass", function=_MetalMass,
+              convert_function=_convMetalMass)
 
-def generate_flat_octree(pf, fields, subregion_bounds = None,parallel=False):
-    """
-    Generates two arrays, one of the actual values in a depth-first flat
-    octree array, and the other of the values describing the refinement.
-    This allows for export to a code that understands this.  *field* is the
-    field used in the data array.
-    """
-    fields = ensure_list(fields)
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields,parallel=parallel)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((o_length,len(fields)), dtype='float64')
-    refined = na.zeros(r_length, dtype='int32')
-    position = amr_utils.position()
-    if subregion_bounds is None:
-        sx, sy, sz = 0, 0, 0
-        nx, ny, nz = ogl[0].dimensions
-    else:
-        ss, ns = zip(*subregion_bounds)
-        sx, sy, sz = ss
-        nx, ny, nz = ns
-    print "Running from %s for %s cells" % (
-            (sx,sy,sz), (nx,ny,nz))
-    t1 = time.time()
-    amr_utils.RecurseOctreeDepthFirst(
-               sx, sy, sz, nx, ny, nz,
-               position, 0,
-               output, refined, ogl)
-    t2 = time.time()
-    print "Finished.  Took %0.3e seconds." % (t2-t1)
-    dd = {}
-    for i, field in enumerate(fields):
-        dd[field] = output[:position.output_pos,i]
-    return dd, refined[:position.refined_pos]
+    def _initial_mass_cen_ostriker(field, data):
+        # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
+        # Check Grid_AddToDiskProfile.C and star_maker7.src
+        star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
+        star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
+        dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
+        xv1 = ((data.pf["InitialTime"] - data["creation_time"])
+                / data["dynamical_time"])
+        xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
+                / data["dynamical_time"])
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        minitial = data["ParticleMassMsun"] / denom
+        return minitial
 
-def generate_levels_octree(pf, fields):
-    fields = ensure_list(fields) + ["Ones", "Ones"]
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((r_length,len(fields)), dtype='float64')
-    genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
-    corners = na.zeros((r_length, 3), dtype='float64')
-    position = na.add.accumulate(
-                na.array([0] + [levels_all[v] for v in
-                    sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
-    pp = position.copy()
-    amr_utils.RecurseOctreeByLevels(0, 0, 0,
-               ogl[0].dimensions[0],
-               ogl[0].dimensions[1],
-               ogl[0].dimensions[2],
-               position.astype('int64'), 1,
-               output, genealogy, corners, ogl)
-    return output, genealogy, levels_all, levels_finest, pp, corners
+    add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-def _initial_mass_cen_ostriker(field, data):
-    # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
-    # Check Grid_AddToDiskProfile.C and star_maker7.src
-    star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
-    star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
-    dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
-    xv1 = ((data.pf["InitialTime"] - data["creation_time"])
-            / data["dynamical_time"])
-    xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
-            / data["dynamical_time"])
-    denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
-    minitial = data["ParticleMassMsun"] / denom
-    return minitial
-add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
-def _temp_times_mass(field, data):
-    return data["Temperature"]*data["CellMassMsun"]
-add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
+class position:
+    def __init__(self):
+        self.output_pos = 0
+        self.refined_pos = 0
 
+
+def vertex_to_octant(v):
+    if v[0]==0 and v[1]==0 and v[2]==0: return 1
+    if v[0]==0 and v[1]==1 and v[2]==0: return 2
+    if v[0]==1 and v[1]==1 and v[2]==0: return 3
+    if v[0]==1 and v[1]==0 and v[2]==0: return 4
+    if v[0]==1 and v[1]==0 and v[2]==1: return 5
+    if v[0]==1 and v[1]==1 and v[2]==1: return 6
+    if v[0]==0 and v[1]==1 and v[2]==1: return 7
+    if v[0]==0 and v[1]==0 and v[2]==1: return 8
+    raise IOError
+
+
+class hilbert_state:
+    vertex = na.zeros(3,dtype='int64')
+    nvertex = na.zeros(3,dtype='int64')
+    signa,signb,signc = 0,0,0
+    dima,dimb,dimc= -1,-1,-2
+    parent_octant = -1
+
+    def __init__(self, vertex=None):
+        if vertex is None:
+            vertex = na.array([1,0,0],dtype='int64')
+        self.vertex= vertex
+        self.parent_octant = vertex_to_octant(self.vertex)
+        self.swap_by_octant(self.parent_octant)
+        self.signc = self.signa*self.signb
+        self.dimc = 3-self.dima-self.dimb
+
+    def swap_by_octant(self, octant): 
+        if octant==1: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        elif octant==2: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = 1
+        elif octant==3:
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        elif octant==4: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = -1
+            self.signb = -1
+        elif octant==5: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = 1
+            self.signb = -1
+        elif octant==6: 
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        elif octant==7: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = -1
+        elif octant==8: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = -1
+            self.signb = 1
+        assert octant < 9
+
+
+    def __iter__(self):
+        return self.next()
+
+    def next(self):
+        #yield the next cell in this oct
+        
+        #as/descend the first dimension
+        # the second dim
+        #reverse the first
+        #climb the third
+        self.vertex[self.dima] = 0 if self.signa>0 else 1
+        self.vertex[self.dimb] = 0 if self.signb>0 else 1
+        self.vertex[self.dimc] = 0 if self.signc>0 else 1
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] + self.signa; 
+        yield self.vertex.copy()
+        self.vertex[self.dimb] = self.vertex[self.dimb] + self.signb; 
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] - self.signa; 
+        yield self.vertex.copy()
+        self.vertex[self.dimc] = self.vertex[self.dimc] + self.signc; 
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] + self.signa; 
+        yield self.vertex.copy()
+        self.vertex[self.dimb] = self.vertex[self.dimb] - self.signb; 
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] - self.signa; 
+        yield self.vertex.copy()
+
+    def next_hilbert(self):
+        nvertex = self.next()
+        return nvertex, hilbert_state(nvertex)
+
+
+
+
+if not debug:
+    from amr_utils import hilbert_state
+    from amr_utils import RecurseOctreeDepthFirstHilbert
+    from amr_utils import position

yt/frontends/art/data_structures.py

     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
+        #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = na.float64
         
 
         if self.pf.file_particle_data:
+            #import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
-            np = lspecies[-1]
-            if self.pf.dm_only:
-                np = lspecies[0]
-            self.pf.particle_position   = self.pf.particle_position[:np]
-            self.pf.particle_position  -= 1.0 #fortran indices start with 0
+            npa,npb=0,0
+            npb = lspecies[-1]
+            clspecies = na.concatenate(([0,],lspecies))
+            if self.pf.only_particle_type is not None:
+                npb = lspecies[0]
+                if type(self.pf.only_particle_type)==type(5):
+                    npa = clspecies[self.pf.only_particle_type]
+                    npb = clspecies[self.pf.only_particle_type+1]
+            np = npb-npa
+            self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
             self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
             pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[:np]
+            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type       = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass       = na.zeros(np,dtype='float64')
+            self.pf.particle_type         = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = na.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
                 #already in unitary units
                 self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
             self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity_fraction']=1.0
+            self.pf.conversion_factors['particle_metallicity']=1.0
+            self.pf.conversion_factors['particle_metallicity1']=1.0
+            self.pf.conversion_factors['particle_metallicity2']=1.0
             self.pf.conversion_factors['particle_index']=1.0
+            self.pf.conversion_factors['particle_type']=1
+            self.pf.conversion_factors['particle_age']=1
+            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
             
-            #import pdb; pdb.set_trace()
 
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                self.pf.particle_type[a:b] = i #particle type
-                self.pf.particle_mass[a:b]    = m*um #mass in solar masses
+                if type(self.pf.only_particle_type)==type(5):
+                    if not i==self.pf.only_particle_type:
+                        continue
+                    self.pf.particle_type += i
+                    self.pf.particle_mass += m*um
+
+                else:
+                    self.pf.particle_type[a:b] = i #particle type
+                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
                 a=b
             pbar.finish()
+
+            nparticles = [0,]+list(lspecies)
+            for j,np in enumerate(nparticles):
+                mylog.debug('found %i of particle type %i'%(j,np))
+            
+            if self.pf.single_particle_mass:
+                #cast all particle masses to the same mass
+                cast_type = self.pf.single_particle_type
+                
+
             
             self.pf.particle_star_index = i
             
-            if self.pf.file_star_data and (not self.pf.dm_only):
+            do_stars = (self.pf.only_particle_type is None) or \
+                       (self.pf.only_particle_type == -1) or \
+                       (self.pf.only_particle_type == len(lspecies))
+            if self.pf.file_star_data and do_stars: 
                 nstars, mass, imass, tbirth, metallicity1, metallicity2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 nstars = nstars[0] 
                 if nstars > 0 :
                     n=min(1e2,len(tbirth))
                     pbar = get_pbar("Stellar Ages        ",n)
-                    self.pf.particle_star_ages  = \
+                    sages  = \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    self.pf.particle_star_ages *= 1.0e9
-                    self.pf.particle_star_ages *= 365*24*3600 #to seconds
-                    self.pf.particle_star_ages = self.pf.current_time-self.pf.particle_star_ages
+                    sages *= 1.0e9
+                    sages *= 365*24*3600 #to seconds
+                    sages = self.pf.current_time-sages
+                    self.pf.particle_age[-nstars:] = sages
                     pbar.finish()
-                    self.pf.particle_star_metallicity1 = metallicity1
-                    self.pf.particle_star_metallicity2 = metallicity2
-                    self.pf.particle_star_mass_initial = imass*um
+                    self.pf.particle_metallicity1[-nstars:] = metallicity1
+                    self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    self.pf.particle_mass_initial[-nstars:] = imass*um
                     self.pf.particle_mass[-nstars:] = mass*um
 
-            left = self.pf.particle_position.shape[0]
+            done = 0
             init = self.pf.particle_position.shape[0]
-            pbar = get_pbar("Gridding Particles ",init)
-            pos = self.pf.particle_position.copy()
-            pid = na.arange(pos.shape[0]).astype('int64')
+            pos = self.pf.particle_position
             #particle indices travel with the particle positions
             #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-            max_level = min(self.pf.max_level,self.pf.limit_level)
+            #if type(self.pf.grid_particles) == type(5):
+            #    max_level = min(max_level,self.pf.grid_particles)
             grid_particle_count = na.zeros((len(grids),1),dtype='int64')
             
             #grid particles at the finest level, removing them once gridded
-            if self.pf.grid_particles:
-                for level in range(max_level,self.pf.min_level-1,-1):
-                    lidx = self.grid_levels[:,0] == level
-                    for gi,gidx in enumerate(na.where(lidx)[0]): 
-                        g = grids[gidx]
-                        assert g is not None
-                        le,re = self.grid_left_edge[gidx],self.grid_right_edge[gidx]
-                        idx = na.logical_and(na.all(le < pos,axis=1),
-                                             na.all(re > pos,axis=1))
-                        fidx = pid[idx]
-                        np = na.sum(idx)                     
-                        g.NumberOfParticles = np
-                        grid_particle_count[gidx,0]=np
-                        g.hierarchy.grid_particle_count = grid_particle_count
-                        if np==0: 
-                            g.particle_indices = []
-                            #we have no particles in this grid
-                        else:
-                            g.particle_indices = fidx.astype('int64')
-                            pos = pos[~idx] #throw out gridded particles from future gridding
-                            pid = pid[~idx]
-                        self.grids[gidx] = g
-                        left -= np
-                        pbar.update(init-left)
-                pbar.finish()
-            else:
-                g = grids[0]
-                g.NumberOfParticles = pos.shape[0]
-                grid_particle_count[0,0]=pos.shape[0]
+            #pbar = get_pbar("Gridding Particles ",init)
+            #assignment = amr_utils.assign_particles_to_cells(
+            #        self.grid_levels.ravel().astype('int32'),
+            #        self.grid_left_edge.astype('float32'),
+            #        self.grid_right_edge.astype('float32'),
+            #        pos[:,0].astype('float32'),
+            #        pos[:,1].astype('float32'),
+            #        pos[:,2].astype('float32'))
+            #pbar.finish()
+
+            pbar = get_pbar("Gridding Particles ",init)
+            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
+                    self.grid_levels.ravel().astype('int32'),
+                    2, #only bother gridding particles to level 2
+                    self.grid_left_edge.astype('float32'),
+                    self.grid_right_edge.astype('float32'),
+                    pos[:,0].astype('float32'),
+                    pos[:,1].astype('float32'),
+                    pos[:,2].astype('float32'))
+            pbar.finish()
+            
+            
+            pbar = get_pbar("Filling grids ",init)
+            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
+                np = len(ilist)
+                grid_particle_count[gidx,0]=np
                 g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = pid
-                grids[0] = g
-                for gi,g in enumerate(grids): self.grids[gi]=g
+                g.particle_indices = ilist
+                grids[gidx] = g
+                done += np
+                pbar.update(done)
+            pbar.finish()
+
+            #assert init-done== 0 #we have gridded every particle
             
-        else:
-            
-            pbar = get_pbar("Finalizing grids ",len(grids))
-            for gi, g in enumerate(grids): 
-                self.grids[gi] = g
-            pbar.finish()
+        pbar = get_pbar("Finalizing grids ",len(grids))
+        for gi, g in enumerate(grids): 
+            self.grids[gi] = g
+        pbar.finish()
             
 
     def _get_grid_parents(self, grid, LE, RE):
                  discover_particles=False,
                  use_particles=True,
                  limit_level=None,
-                 dm_only=False,
-                 grid_particles=False):
+                 only_particle_type = None,
+                 grid_particles=False,
+                 single_particle_mass=False,
+                 single_particle_type=0):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
         self.file_particle_header = file_particle_header
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
-        self.dm_only = dm_only
+        self.only_particle_type = only_particle_type
         self.grid_particles = grid_particles
+        self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
             self.limit_level = na.inf

yt/frontends/art/definitions.py

 """
 
 art_particle_field_names = [
+'particle_age',
 'particle_index',
 'particle_mass',
+'particle_mass_initial',
 'particle_creation_time',
-'particle_metallicity_fraction',
+'particle_metallicity1',
+'particle_metallicity2',
+'particle_metallicity',
 'particle_position_x',
 'particle_position_y',
 'particle_position_z',

yt/frontends/art/fields.py

         add_field(f, function=lambda a,b: None, take_log=True,
                   validators = [ValidateDataField(f)])
 
-#Fields that are verified to be OK unit-wise:
+#Hydro Fields that are verified to be OK unit-wise:
 #Density
+#Temperature
 
-#Fields that need to be tested:
+#Hydro Fields that need to be tested:
 #TotalEnergy
 #XYZMomentum
 #Pressure
 #MetalDensity SNII + SNia
 #Potentials
 
-#Derived fields that are OK
-#Temperature
-
-#Derived fields that are untested:
+#Hydro Derived fields that are untested:
 #metallicities
 #xyzvelocity
 
-#Individual definitions for native fields
+#Particle fields that are tested:
+#particle_position_xyz
+#particle_type
+#particle_index
+#particle_mass
+#particle_mass_initial
+#particle_age
+#particle_velocity
+#particle_metallicity12
+
+#Particle fields that are untested:
+#NONE
+
 
 def _convertDensity(data):
     return data.convert("Density")

yt/frontends/art/io.py

 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = grid.particle_indices
+        idx = na.array(grid.particle_indices)
         if field == 'particle_index':
-            return idx
+            return na.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
         if field == 'particle_velocity_z':
             return grid.pf.particle_velocity[idx][:,2]
         
-        tridx = grid.particle_indices >= grid.pf.particle_star_index
-        sidx  = grid.particle_indices[tridx] - grid.pf.particle_star_index
-        n = grid.particle_indices
-        if field == 'particle_creation_time':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_ages[sidx]
-            return tr
-        if field == 'particle_metallicity_fraction':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx]  = grid.pf.particle_star_metallicity1[sidx]
-                tr[tridx] += grid.pf.particle_star_metallicity2[sidx]
-            return tr
+        #stellar fields
+        if field == 'particle_age':
+            return grid.pf.particle_age[idx]
+        if field == 'particle_metallicity':
+            return grid.pf.particle_metallicity1[idx] +\
+                   grid.pf.particle_metallicity2[idx]
         if field == 'particle_metallicity1':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_metallicity1[sidx]
-            return tr
+            return grid.pf.particle_metallicity1[idx]
         if field == 'particle_metallicity2':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_metallicity2[sidx]
-            return tr
+            return grid.pf.particle_metallicity2[idx]
         if field == 'particle_mass_initial':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_mass_initial[sidx]
-            return tr
+            return grid.pf.particle_mass_initial[idx]
+        
         raise 'Should have matched one of the particle fields...'
 
         
     def _read_data_set(self, grid, field):
-        #import pdb; pdb.set_trace()
         if field in art_particle_field_names:
             return self._read_particle_field(grid, field)
         pf = grid.pf

yt/utilities/_amr_utils/CICDeposit.pyx

 """
-Simle integrators for the radiative transfer equation
+Simple integrators for the radiative transfer equation
 
 Author: Britton Smith <brittonsmith@gmail.com>
 Affiliation: CASA/University of Colorado
+Author: Christopher Moody <juxtaposicion@gmail.com>
+Affiliation: cemoody@ucsc.edu
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008 Matthew Turk.  All Rights Reserved.
         ind[2] = <int> ((pos_z[i] - left_edge[2]) * idds[2])
         sample[i] = arr[ind[0], ind[1], ind[2]]
     return sample
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+@cython.cdivision(True)
+def assign_particles_to_cells(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    cdef long level_max = np.max(levels)
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+    return assign
+
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+@cython.cdivision(True)
+def assign_particles_to_cell_lists(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.int64_t level_max, 
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    #Todo: instead of iterating every particles, could use kdtree 
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    index_lists = []
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            index_list = []
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+                            index_list += j,
+            index_lists += index_list,
+    return assign,index_lists
+
+    
+    

yt/utilities/_amr_utils/DepthFirstOctree.pyx

         self.output_pos = 0
         self.refined_pos = 0
 
+cdef class hilbert_state:
+#    """
+#    From libpjutil/hilbert.h:
+#    This class represents the extra information associated with an
+#    octree cell to be able to perform a Hilbert ordering. This
+#    information consists of a permutation of the dimensions and the
+#    direction of the 3 axes, which can be encoded in one byte. For an
+#    octree traversal stack this information needs to be saved at each
+#    level, so it's good to have a compact representation. 
+#
+#    The data is stored as follows: Bits 0-1 stores the first
+#    dimension, 2-3 the second. Because we know it is a permutation of
+#    012, there is no need to store the third dimension. Then bits 4-6
+#    are the signs of the three axes.
+#
+#    Apparently there is no way to encode a uint8_t literal except as a
+#    character constant, hence the use of those.
+#    """
+# These assignments are from 2.7 of BG200
+# vertex > 1st dim 2nd dim 3rd dim
+# 1 > +z+x+y 
+# 2 > +y+z+x
+# 3 > +y+x+z
+# 4 > -z-y+x 
+# 5 > +z-y-x
+# 6 > +y+x+z
+# 7 > +y-z-x
+# 8 > -z+x-y
+    cdef public int dima,dimb,dimc,signa,signb,signc
+    #cdef np.ndarray[np.int32,ndim =1] a = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] b = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] c = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] d = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] e = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] f = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] g = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] h = na.zeros(3)
+
+    cdef np.ndarray[np.int64,ndim =1] oct = na.zeros(3)
+    cdef np.ndarray[np.int64,ndim =1] noct = na.zeros(3)
+
+
+    #a[0],a[1],a[2] = 0,0,0
+    #b[0],b[1],b[2] = 0,1,0
+    #c[0],c[1],c[2] = 1,1,0
+    #d[0],d[1],d[2] = 1,0,0
+    #e[0],e[1],e[2] = 1,0,1
+    #f[0],f[1],f[2] = 1,1,1
+    #g[0],g[1],g[2] = 0,1,1
+    #h[0],h[1],h[2] = 0,0,1
+
+    def __cinit__(int parent_octant):
+        self.swap_by_octant(parent_octant)
+        self.signc = signa*signb
+        self.dimc = 3-dima-dimb
+    
+    def swap_by_octant(int octant): 
+        if octant==1: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        if octant==2: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = 1
+        if octant==3:
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        if octant==4: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = -1
+            self.signb = -1
+        if octant==5: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = 1
+            self.signb = -1
+        if octant==6: 
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        if octant==7: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = -1
+        if octant==8: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = -1
+            self.signb = 1
+
+    def __iter__(self):
+        return self.next_hilbert()
+
+    def next(self):
+        #yield the next cell in this oct
+        
+        #as/descend the first dimension
+        # the second dim
+        #reverse the first
+        #climb the third
+        oct[self.dima] = 0 if self.signx>0 else 1
+        oct[self.dimb] = 0 if self.signy>0 else 1
+        oct[self.dimc] = 0 if self.signz>0 else 1
+        yield oct
+        oct[self.dima] += self.signa; yield oct
+        oct[self.dimb] += self.signb; yield oct
+        oct[self.dima] -= self.signa; yield oct
+        oct[self.dimc] += self.signc; yield oct
+        oct[self.dima] += self.signa; yield oct
+        oct[self.dimb] -= self.signb; yield oct
+        oct[self.dimb] -= self.signa; return oct
+
+    def next_hilbert(self):
+        noct = self.next()
+        return noct, hilbert_state(noct)
+
+@cython.boundscheck(False)
+def RecurseOctreeDepthFirstHilbert(int i_i, int j_i, int k_i,
+                            int i_f, int j_f, int k_f,
+                            position curpos, int gi, 
+                            hilbert_state hs,
+                            np.ndarray[np.float64_t, ndim=3] output,
+                            np.ndarray[np.int64_t, ndim=1] refined,
+                            OctreeGridList grids):
+    #cdef int s = curpos
+    cdef int i, i_off, j, j_off, k, k_off, ci, fi
+    cdef int child_i, child_j, child_k
+    cdef OctreeGrid child_grid
+    cdef OctreeGrid grid = grids[gi]
+    cdef np.ndarray[np.int32_t, ndim=3] child_indices = grid.child_indices
+    cdef np.ndarray[np.int32_t, ndim=1] dimensions = grid.dimensions
+    cdef np.ndarray[np.float64_t, ndim=4] fields = grid.fields
+    cdef np.ndarray[np.float64_t, ndim=1] leftedges = grid.left_edges
+    cdef np.float64_t dx = grid.dx[0]
+    cdef np.float64_t child_dx
+    cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
+    cdef np.float64_t cx, cy, cz
+    cdef np.ndarray[np.int32_t, ndim=1] oct
+    cdef hilbert_state hs_child
+    for oct,hs_child in hs:
+        i,j,k = oct[0],oct[1],oct[2]
+        ci = grid.child_indices[i,j,k]
+        if ci == -1:
+            for fi in range(fields.shape[0]):
+                output[curpos.output_pos,fi] = fields[fi,i,j,k]
+            refined[curpos.refined_pos] = 0
+            curpos.output_pos += 1
+            curpos.refined_pos += 1
+        else:
+            refined[curpos.refined_pos] = 1
+            curpos.refined_pos += 1
+            child_grid = grids[ci-grid.offset]
+            child_dx = child_grid.dx[0]
+            child_leftedges = child_grid.left_edges
+            child_i = int((cx - child_leftedges[0])/child_dx)
+            child_j = int((cy - child_leftedges[1])/child_dx)
+            child_k = int((cz - child_leftedges[2])/child_dx)
+            RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
+                                curpos, ci - grid.offset, hs_child, output, refined, grids)
+
+
 cdef class OctreeGrid:
     cdef public object child_indices, fields, left_edges, dimensions, dx
     cdef public int level, offset
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
     for i_off in range(i_f):
-        i = i_off + i_i
+        i = i_off + i_i #index
         cx = (leftedges[0] + i*dx)
         for j_off in range(j_f):
             j = j_off + j_i
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.