Commits

Christopher Moody committed c94dedb Merge

merge

Comments (0)

Files changed (10)

 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
+with-xunit=1
+#with-answer-testing=1
+#answer-compare=gold001

yt/analysis_modules/halo_finding/rockstar/rockstar.py

 from yt.mods import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
+
 from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
-from yt.config import ytcfg
-
 import rockstar_interface
-
 import socket
 import time
-import threading
-import signal
-import os
-from os import environ
-from os import mkdir
-from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
+class DomainDecomposer(ParallelAnalysisInterface):
+    def __init__(self, pf, comm):
+        ParallelAnalysisInterface.__init__(self, comm=comm)
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
 
-class InlineRunner(ParallelAnalysisInterface):
-    def __init__(self, num_writers):
-        # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
-        if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-
-    def split_work(self, pool):
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're inline, everyone is a reader.
-        self.readers = avail[:]
-        if self.num_writers == pool.comm.size:
-            # And everyone is a writer!
-            self.writers = avail[:]
-        else:
-            # Everyone is not a writer.
-            # Cyclically assign writers which should approximate
-            # memory load balancing (depending on the mpirun call,
-            # but this should do it in most cases).
-            stride = int(ceil(float(pool.comm.size) / self.num_writers))
-            while len(self.writers) < self.num_writers:
-                self.writers.extend(avail[::stride])
-                for r in readers:
-                    avail.pop(avail.index(r))
-
-    def run(self, handler, pool):
-        # If inline, we use forks.
-        server_pid = 0
-        # Start a server on only one machine/fork.
-        if pool.comm.rank == 0:
-            server_pid = os.fork()
-            if server_pid == 0:
-                handler.start_server()
-                os._exit(0)
-        # Start writers.
-        writer_pid = 0
-        if pool.comm.rank in self.writers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            writer_pid = os.fork()
-            if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
-                os._exit(0)
-        # Start readers, not forked.
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        # Make sure the forks are done, which they should be.
-        if writer_pid != 0:
-            os.waitpid(writer_pid, 0)
-        if server_pid != 0:
-            os.waitpid(server_pid, 0)
-
-class StandardRunner(ParallelAnalysisInterface):
-    def __init__(self, num_readers, num_writers):
-        self.num_readers = num_readers
-        if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
-            mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
-            raise RuntimeError
-    
-    def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
-    
-    def run(self, handler, pool):
-        # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
-            handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+    def decompose(self):
+        dd = self.pf.h.all_data()
+        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
+        return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
             The number of reader can be increased from the default
             of 1 in the event that a single snapshot is split among
             many files. This can help in cases where performance is
-            IO-limited. Default is 1. If run inline, it is
-            equal to the number of MPI threads.
+            IO-limited. Default is 1.
         num_writers: int
             The number of writers determines the number of processing threads
             as well as the number of threads writing output data.
-            The default is set to comm.size-num_readers-1. If run inline,
-            the default is equal to the number of MPI threads.
+            The default is set comm.size-num_readers-1.
         outbase: str
             This is where the out*list files that Rockstar makes should be
-            placed. Default is 'rockstar_halos'.
+            placed. Default is str(pf)+'_rockstar'.
         particle_mass: float
             This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: float
-            This parameter specifies the force resolution that Rockstar uses
-            in units of Mpc/h.
-            If no value is provided, this parameter is automatically set to
-            the width of the smallest grid element in the simulation from the
-            last data snapshot (i.e. the one where time has evolved the
-            longest) in the time series:
-            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
-            
+        force_res: None
+            The default force resolution is 0.0012 comoving Mpc/H
+            This overrides Rockstars' defaults
+
         Returns
         -------
         None
 
         test_rockstar.py:
 
+        from mpi4py import MPI
         from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
         from yt.mods import *
         import sys
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
-        # Decide how we're working.
-        if ytcfg.getboolean("yt", "inline") == True:
-            self.runner = InlineRunner(num_writers)
-        else:
-            self.runner = StandardRunner(num_readers, num_writers)
-        self.num_readers = self.runner.num_readers
-        self.num_writers = self.runner.num_writers
-        mylog.info("Rockstar is using %d readers and %d writers",
-            self.num_readers, self.num_writers)
-        # Note that Rockstar does not support subvolumes.
-        # We assume that all of the snapshots in the time series
-        # use the same domain info as the first snapshots.
+        ParallelAnalysisInterface.__init__(self)
+        # No subvolume support
+        #we assume that all of the snapshots in the time series
+        #use the same domain info as the first snapshots
         if not isinstance(ts,TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
+        if self.comm.size > 1: 
+            self.comm.barrier()            
         tpf = ts.__iter__().next()
-        def _particle_count(field, data):
-            try:
-                return (data["particle_type"]==dm_type).sum()
-            except KeyError:
-                return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
-        # Get total_particles in parallel.
         dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
         if outbase is None:
-            outbase = 'rockstar_halos'
-        self.outbase = outbase
-        self.particle_mass = particle_mass
-        if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
-        else:
-            self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
+            outbase = str(tpf)+'_rockstar'
+        self.outbase = outbase        
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
+        self.num_readers = num_readers
+        self.num_writers = num_writers
+        if self.num_readers + self.num_writers + 1 != self.comm.size:
+            #we need readers+writers+1 server = comm size        
+            raise RuntimeError
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
+        data_source = tpf.h.all_data()
+        self.comm.barrier()
+        self.force_res = force_res
+        def _pcount(field,data):
+            return (data["particle_type"]=dm_type).sum()
+        add_field("pcount",function=_pcount,particle_type=True)
+        total_particles = dd.quantities['TotalQuantity']('pcount')
+        self.total_particles = total_particles
+        mylog.info("Found %i halo particles",total_particles)
         self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+                self.ts, data_source)
 
     def __del__(self):
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.size == 1 or self.workgroup.name == "server":
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
         """
         
         """
+        if self.comm.size > 1:
+            self.pool = ProcessorPool()
+            mylog.debug("Num Writers = %s Num Readers = %s",
+                        self.num_writers, self.num_readers)
+            self.pool.add_workgroup(1, name = "server")
+            self.pool.add_workgroup(self.num_readers, name = "readers")
+            self.pool.add_workgroup(self.num_writers, name = "writers")
+            for wg in self.pool.workgroups:
+                if self.comm.rank in wg.ranks: self.workgroup = wg
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), self.total_particles, 
-                    self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    len(self.ts), self.total_particles, self.dm_type,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
-        # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
-            # Make a record of which dataset corresponds to which set of
-            # output files because it will be easy to lose this connection.
-            fp = open(self.outbase + '/pfs.txt', 'w')
-            fp.write("# pfname\tindex\n")
-            for i, pf in enumerate(self.ts):
-                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
-                line = "%s\t%d\n" % (pfloc, i)
-                fp.write(line)
-            fp.close()
-        # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            # Split up the work.
-            self.runner.split_work(self.pool)
-            # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.comm.barrier()
+            if self.workgroup.name == "server":
+                self.handler.start_server()
+            elif self.workgroup.name == "readers":
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            elif self.workgroup.name == "writers":
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            self.pool.free_all()
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)
+        tpf = self.ts[0]
+        return RockstarHaloList(tpf,file_name)

yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx

 cimport cython
 from libc.stdlib cimport malloc
 
-from yt.config import ytcfg
-
 cdef import from "particle.h":
     struct particle:
         np.int64_t id
 cdef import from "config.h":
     void setup_config()
 
-cdef import from "server.h" nogil:
+cdef import from "server.h":
     int server()
 
-cdef import from "client.h" nogil:
-    void client(np.int64_t in_type)
+cdef import from "client.h":
+    void client()
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
-    global SCALE_NOW
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
+    global SCALE_NOW, TOTAL_PARTICLES
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
-    pf = rh.tsl.next()
-    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])
+    
+
+    # Now we want to grab data from only a subset of the grids.
     n = rh.block_ratio
-
-    all_grids = pf.h.grids
+    dd = pf.h.all_data()
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-    # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
-    all_fields = set(pf.h.derived_field_list + pf.h.field_list)
-
-    # First we need to find out how many this reader is going to read in
-    # if the number of readers > 1.
-    if NUM_BLOCKS > 1:
-        local_parts = 0
-        for g in grids:
-            if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-                iddm = g["particle_type"] == rh.dm_type
-            else:
-                iddm = Ellipsis
-            arri = g["particle_index"].astype("int64")
-            arri = arri[iddm] #pick only DM
-            local_parts += arri.size
-    else:
-        local_parts = TOTAL_PARTICLES
-
-    #print "local_parts", local_parts
-
-    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
-
+    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
+    tnpart = 0
+    for g in grids:
+        tnpart += np.sum(dd._get_data_from_grid(g, "particle_type")==rh.dm_type)
+    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
+    #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
     left_edge[0] = pf.domain_left_edge[0]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
-        if g.NumberOfParticles == 0: continue
-        if "particle_type" in all_fields:
-            iddm = g["particle_type"] == rh.dm_type
-        else:
-            iddm = Ellipsis
-        arri = g["particle_index"].astype("int64")
+        iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
                       "particle_position_z",
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
-            arr = g[field].astype("float64")
+            arr = dd._get_data_from_grid(g, field).astype("float64")
             arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = local_parts
+    num_p[0] = tnpart
+    TOTAL_PARTICLES = tnpart
+    #print 'first particle coordinates'
+    #for i in range(3):
+    #    print p[0][0].pos[i],
+    #print ""
+    #print 'last particle coordinates'
+    #for i in range(3):
+    #    print p[0][tnpart-1].pos[i],
+    #print ""
 
 cdef class RockstarInterface:
 
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        global OVERLAP_LENGTH, FORCE_RES
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
-            #print "set force res to ",FORCE_RES
+            print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
                     tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
-        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 
         output_and_free_halos(0, 0, 0, NULL)
 
     def start_server(self):
-        with nogil:
-            server()
+        server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
-        client(in_type)
+    def start_client(self):
+        client()

yt/analysis_modules/sunrise_export/sunrise_exporter.py

 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
+    if l<2:
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-

yt/data_objects/time_series.py

File contents unchanged.

yt/frontends/art/data_structures.py

 
 Author: Matthew Turk <matthewturk@gmail.com>
 Affiliation: UCSD
+Author: Christopher Moody <cemoody@ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-
+.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
 import numpy as np
+import os.path
+import glob
 import stat
 import weakref
-import cPickle
-import os
-import struct
+import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+from .definitions import *
+from io import _read_child_mask_level
+from io import read_particles
+from io import read_stars
+from io import spread_ages
+from io import _count_art_octs
+from io import _read_art_level_info
+from io import _read_art_child
+from io import _skip_record
+from io import _read_record
+from io import _read_frecord
+from io import _read_record_size
+from io import _read_struct
+from io import b2t
 
+
+import yt.frontends.ramses._ramses_reader as _ramses_reader
+
+from .fields import ARTFieldInfo, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
-from yt.frontends.art.definitions import art_particle_field_names
-
-from yt.frontends.art.io import _read_child_mask_level
-from yt.frontends.art.io import read_particles
-from yt.frontends.art.io import read_stars
-from yt.frontends.art.io import _count_art_octs
-from yt.frontends.art.io import _read_art_level_info
-from yt.frontends.art.io import _read_art_child
-from yt.frontends.art.io import _skip_record
-from yt.frontends.art.io import _read_record
-from yt.frontends.art.io import _read_frecord
-from yt.frontends.art.io import _read_record_size
-from yt.frontends.art.io import _read_struct
-from yt.frontends.art.io import b2t
-
-def num_deep_inc(f):
-    def wrap(self, *args, **kwargs):
-        self.num_deep += 1
-        rv = f(self, *args, **kwargs)
-        self.num_deep -= 1
-        return rv
-    return wrap
-
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
-        #if child_mask is not None:
-        #    self._set_child_mask(child_mask)
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=nop
+        for particle_field in particle_fields:
+            setattr(self,particle_field,np.array([]))
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
             self.dds = self.Parent[0].dds / self.pf.refine_by
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
                        np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by)\
+                           .astype('int64').ravel()
         return self.start_index
 
     def __repr__(self):
         return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 class ARTHierarchy(AMRHierarchy):
-
     grid = ARTGrid
     _handle = None
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
+        if not self.pf.skip_particles:
+            self._setup_particle_grids()
+        self._setup_particle_grids()
         self._setup_field_list()
         
+    def _setup_particle_grids(self):
+        pass
+    
     def _initialize_data_storage(self):
         pass
-
+    
     def _detect_fields(self):
-        # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-             'Pressure','Gamma','GasEnergy',
-             'MetalDensitySNII', 'MetalDensitySNIa',
-             'PotentialNew','PotentialOld']
-        self.field_list += art_particle_field_names
-
+        self.field_list = []
+        self.field_list += fluid_fields
+        self.field_list += particle_fields
+        
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
-
+            
     def _count_grids(self):
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        
         min_eff = 0.30
-        
         vol_max = 128**3
-        
-        f = open(self.pf.parameter_filename,'rb')
-        
-        
-        (self.pf.nhydro_vars, self.pf.level_info,
-        self.pf.level_oct_offsets, 
-        self.pf.level_child_offsets) = \
-                         _count_art_octs(f, 
-                          self.pf.child_grid_offset,
-                          self.pf.min_level, self.pf.max_level)
-        self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = np.array(self.pf.level_info)        
-        self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
-        self.pf.level_offsets[0] = self.pf.root_grid_offset
-        
-        self.pf.level_art_child_masks = {}
-        cm = self.pf.root_iOctCh>0
-        cm_shape = (1,)+cm.shape 
-        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
-        del cm
-        
-        root_psg = _ramses_reader.ProtoSubgrid(
-                        np.zeros(3, dtype='int64'), # left index of PSG
-                        self.pf.domain_dimensions, # dim of PSG
-                        np.zeros((1,3), dtype='int64'), # left edges of grids
-                        np.zeros((1,6), dtype='int64') # empty
-                        )
-        
-        self.proto_grids = [[root_psg],]
-        for level in xrange(1, len(self.pf.level_info)):
-            if self.pf.level_info[level] == 0:
-                self.proto_grids.append([])
-                continue
-            psgs = []
-            effs,sizes = [], []
-
-            if level > self.pf.limit_level : continue
-            
-            #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
-            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            
-            #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
-                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-            self.pf.level_art_child_masks[level]=art_child_mask
-            #child_mask is zero where child grids exist and
-            #thus where higher resolution data is available
-            
-            
-            #compute the hilbert indices up to a certain level
-            #the indices will associate an oct grid to the nearest
-            #hilbert index?
-            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                              np.log10(2))
-            hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
-            #print base_level, hilbert_indices.max(),
-            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            #print hilbert_indices.max()
-            
-            # Strictly speaking, we don't care about the index of any
-            # individual oct at this point.  So we can then split them up.
-            unique_indices = np.unique(hilbert_indices)
-            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                        level, unique_indices.size, hilbert_indices.size)
-            
-            #use the hilbert indices to order oct grids so that consecutive
-            #items on a list are spatially near each other
-            #this is useful because we will define grid patches over these
-            #octs, which are more efficient if the octs are spatially close
-            
-            #split into list of lists, with domains containing 
-            #lists of sub octgrid left indices and an index
-            #referring to the domain on which they live
-            pbar = get_pbar("Calc Hilbert Indices ",1)
-            locs, lefts = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices, left_index, fl)
-            pbar.finish()
-            
-            #iterate over the domains    
-            step=0
-            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-            psg_eff = []
-            for ddleft_index, ddfl in zip(lefts, locs):
-                #iterate over just the unique octs
-                #why would we ever have non-unique octs?
-                #perhaps the hilbert ordering may visit the same
-                #oct multiple times - review only unique octs 
-                #for idomain in np.unique(ddfl[:,1]):
-                #dom_ind = ddfl[:,1] == idomain
-                #dleft_index = ddleft_index[dom_ind,:]
-                #dfl = ddfl[dom_ind,:]
+        with open(self.pf.parameter_filename,'rb') as f:
+            (self.pf.nhydro_vars, self.pf.level_info,
+            self.pf.level_oct_offsets, 
+            self.pf.level_child_offsets) = \
+                             _count_art_octs(f, 
+                              self.pf.child_grid_offset,
+                              self.pf.min_level, self.pf.max_level)
+            self.pf.level_info[0]=self.pf.ncell
+            self.pf.level_info = np.array(self.pf.level_info)
+            self.pf.level_offsets = self.pf.level_child_offsets
+            self.pf.level_offsets = np.array(self.pf.level_offsets, 
+                                             dtype='int64')
+            self.pf.level_offsets[0] = self.pf.root_grid_offset
+            self.pf.level_art_child_masks = {}
+            cm = self.pf.root_iOctCh>0
+            cm_shape = (1,)+cm.shape 
+            self.pf.level_art_child_masks[0] = \
+                    cm.reshape(cm_shape).astype('uint8')        
+            del cm
+            root_psg = _ramses_reader.ProtoSubgrid(
+                            np.zeros(3, dtype='int64'), # left index of PSG
+                            self.pf.domain_dimensions, # dim of PSG
+                            np.zeros((1,3), dtype='int64'),# left edges of grids
+                            np.zeros((1,6), dtype='int64') # empty
+                            )
+            self.proto_grids = [[root_psg],]
+            for level in xrange(1, len(self.pf.level_info)):
+                if self.pf.level_info[level] == 0:
+                    self.proto_grids.append([])
+                    continue
+                psgs = []
+                effs,sizes = [], []
+                if self.pf.limit_level:
+                    if level > self.pf.limit_level : continue
+                #refers to the left index for the art octgrid
+                left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                        self.pf.level_oct_offsets,level,
+                        coarse_grid=self.pf.domain_dimensions[0])
+                if level>1:
+                    assert root_level == last_root_level
+                last_root_level = root_level
+                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+                #read in the child masks for this level and save them
+                idc, art_child_mask = _read_child_mask_level(f, 
+                        self.pf.level_child_offsets,
+                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+                self.pf.level_art_child_masks[level]=art_child_mask
+                #child_mask is zero where child grids exist and
+                #thus where higher resolution data is available
+                #compute the hilbert indices up to a certain level
+                #the indices will associate an oct grid to the nearest
+                #hilbert index?
+                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                                  np.log10(2))
+                hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                        level + base_level, left_index)
+                #print base_level, hilbert_indices.max(),
+                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+                #print hilbert_indices.max()
+                # Strictly speaking, we don't care about the index of any
+                # individual oct at this point.  So we can then split them up.
+                unique_indices = np.unique(hilbert_indices)
+                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                            level, unique_indices.size, hilbert_indices.size)
+                #use the hilbert indices to order oct grids so that consecutive
+                #items on a list are spatially near each other
+                #this is useful because we will define grid patches over these
+                #octs, which are more efficient if the octs are spatially close
+                #split into list of lists, with domains containing 
+                #lists of sub octgrid left indices and an index
+                #referring to the domain on which they live
+                pbar = get_pbar("Calc Hilbert Indices ",1)
+                locs, lefts = _ramses_reader.get_array_indices_lists(
+                            hilbert_indices, unique_indices, left_index, fl)
+                pbar.finish()
+                #iterate over the domains    
+                step=0
+                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+                psg_eff = []
+                for ddleft_index, ddfl in zip(lefts, locs):
+                    #iterate over just the unique octs
+                    #why would we ever have non-unique octs?
+                    #perhaps the hilbert ordering may visit the same
+                    #oct multiple times - review only unique octs 
+                    #for idomain in np.unique(ddfl[:,1]):
+                    #dom_ind = ddfl[:,1] == idomain
+                    #dleft_index = ddleft_index[dom_ind,:]
+                    #dfl = ddfl[dom_ind,:]
+                    dleft_index = ddleft_index
+                    dfl = ddfl
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
+                    idims +=2
+                    #this creates a grid patch that doesn't cover the whole leve
+                    #necessarily, but with other patches covers all the regions
+                    #with octs. This object automatically shrinks its size
+                    #to barely encompass the octs inside of it.
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    #because grid patches maybe mostly empty, and with octs
+                    #that only partially fill the grid, it may be more efficient
+                    #to split large patches into smaller patches. We split
+                    #if less than 10% the volume of a patch is covered with octs
+                    if idims.prod() > vol_max or psg.efficiency < min_eff:
+                        psg_split = _ramses_reader.recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dfl,min_eff=min_eff,use_center=True,
+                            split_on_vol=vol_max)
+                        psgs.extend(psg_split)
+                        psg_eff += [x.efficiency for x in psg_split] 
+                    else:
+                        psgs.append(psg)
+                        psg_eff =  [psg.efficiency,]
+                    tol = 1.00001
+                    step+=1
+                    pbar.update(step)
+                eff_mean = np.mean(psg_eff)
+                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
+                eff_nall = len(psg_eff)
+                mylog.info("Average subgrid efficiency %02.1f %%",
+                            eff_mean*100.0)
+                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-                dleft_index = ddleft_index
-                dfl = ddfl
-                initial_left = np.min(dleft_index, axis=0)
-                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #this creates a grid patch that doesn't cover the whole level
-                #necessarily, but with other patches covers all the regions
-                #with octs. This object automatically shrinks its size
-                #to barely encompass the octs inside of it.
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                
-                #because grid patches may still be mostly empty, and with octs
-                #that only partially fill the grid,it  may be more efficient
-                #to split large patches into smaller patches. We split
-                #if less than 10% the volume of a patch is covered with octs
-                if idims.prod() > vol_max or psg.efficiency < min_eff:
-                    psg_split = _ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
-                    
-                    psgs.extend(psg_split)
-                    psg_eff += [x.efficiency for x in psg_split] 
-                else:
-                    psgs.append(psg)
-                    psg_eff =  [psg.efficiency,]
-                
-                tol = 1.00001
-                
-                
-                step+=1
-                pbar.update(step)
-            eff_mean = np.mean(psg_eff)
-            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-            eff_nall = len(psg_eff)
-            mylog.info("Average subgrid efficiency %02.1f %%",
-                        eff_mean*100.0)
-            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            
-        
-            mylog.debug("Done with level % 2i", level)
-            pbar.finish()
-            self.proto_grids.append(psgs)
-            #print sum(len(psg.grid_file_locations) for psg in psgs)
-            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
-            if len(self.proto_grids[level]) == 1: continue
+                mylog.info("Done with level % 2i; max LE %i", level,
+                           np.max(left_index))
+                pbar.finish()
+                self.proto_grids.append(psgs)
+                if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
-                    
-            
-            
-
-    num_deep = 0
-
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*np.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
-        
-
-        if self.pf.file_particle_data:
+        if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
-            Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
-            a = self.pf.parameters['aexpn']
-            hubble = self.pf.parameters['hubble']
-            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
-            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
-            um  = self.pf.parameters['aM0'] #mass units in solar masses
-            um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles   ",5)
+            um  = self.pf.conversion_factors['Mass'] #mass units in g
+            uv  = self.pf.conversion_factors['Velocity'] #mass units in g
             self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
-            pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = np.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
-            #do NOT correct by an offset of 1.0
-            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
-            pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
-            pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+                read_particles(self.pf.file_particle_data,
+                        self.pf.parameters['Nrow'])
+            nparticles = lspecies[-1]
+            if not np.all(self.pf.particle_position[nparticles:]==0.0):
+                mylog.info('WARNING: unused particles discovered from lspecies')
+            self.pf.particle_position = self.pf.particle_position[:nparticles]
+            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            self.pf.particle_velocity   = self.pf.particle_velocity
             self.pf.particle_velocity  *= uv #to proper cm/s
-            pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
-            
-            dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_species'] = 1.0
-            for ax in 'xyz':
-                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                #already in unitary units
-                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
-            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
-            self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
-            
-
-            a,b=0,0
+            self.pf.particle_star_index = len(wspecies)-1
+            self.pf.particle_type = np.zeros(nparticles,dtype='int')
+            self.pf.particle_mass = np.zeros(nparticles,dtype='float32')
+            a=0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
-                else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                if i == self.pf.particle_star_index:
+                    sa,sb = a,b
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b] = m*um #mass in grams
                 a=b
-            pbar.finish()
-
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
-            
-            self.pf.particle_star_index = i
-            
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+            if not self.pf.skip_stars and self.pf.file_particle_stars: 
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_particle_stars)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = b-a
+                inconsistent=self.pf.particle_type==self.pf.particle_star_index
+                if not nstars_rs==np.sum(inconsistent):
+                    mylog.info('WARNING!: nstars is inconsistent!')
+                del inconsistent
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
-                    pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
-                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
-                    pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
-
-            done = 0
-            init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
-            #particle indices travel with the particle positions
-            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    np.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
-
+                    birthtimes= b2t(tbirth,n=n)
+                    birthtimes = birthtimes.astype('float64')
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread_age
+                    if type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    elif spread:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index
+                    for psf in particle_star_fields:
+                        setattr(self.pf,psf,
+                                np.zeros(nparticles,dtype='float32'))
+                    self.pf.particle_age[sa:sb] = ages
+                    self.pf.particle_mass[sa:sb] = mass
+                    self.pf.particle_mass_initial[sa:sb] = imass
+                    self.pf.particle_creation_time[sa:sb] = birthtimes
+                    self.pf.particle_metallicity1[sa:sb] = metallicity1
+                    self.pf.particle_metallicity2[sa:sb] = metallicity2
+                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
+                                                          + metallicity2
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
     def _get_grid_parents(self, grid, LE, RE):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+            #instead of gridding particles assign them all to the root grid
+            if gi==0:
+                for particle_field in particle_fields:
+                    source = getattr(self.pf,particle_field,None)
+                    if source is None:
+                        for i,ax in enumerate('xyz'):
+                            pf = particle_field.replace('_%s'%ax,'')
+                            source = getattr(self.pf,pf,None)
+                            if source is not None:
+                                source = source[:,i]
+                                break
+                    if source is not None:
+                        mylog.info("Attaching %s to the root grid",
+                                    particle_field)
+                        g.NumberOfParticles = source.shape[0]
+                        setattr(g,particle_field,source)