Commits

Matthew Turk  committed 6cf2269 Merge

Merge

  • Participants
  • Parent commits f49fc87, 1ab51a4

Comments (0)

Files changed (15)

File yt/analysis_modules/halo_finding/halo_objects.py

 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only=True):
+    def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise

File yt/analysis_modules/halo_finding/setup.py

File contents unchanged.

File yt/data_objects/data_containers.py

         """
         Deletes a field
         """
+        if key  not in self.field_data:
+            key = self._determine_fields(key)[0]
         del self.field_data[key]
 
     def _generate_field(self, field):

File yt/data_objects/derived_quantities.py

     return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)
+
+def _ParticleDensityCenter(data,nbins=3,particle_type="all"):
+    """
+    Find the center of the particle density
+    by histogramming the particles iteratively.
+    """
+    pos = [data[(particle_type,"particle_position_%s"%ax)] for ax in "xyz"]
+    pos = np.array(pos).T
+    mas = data[(particle_type,"particle_mass")]
+    calc_radius= lambda x,y:np.sqrt(np.sum((x-y)**2.0,axis=1))
+    density = 0
+    if pos.shape[0]==0:
+        return -1.0,[-1.,-1.,-1.]
+    while pos.shape[0] > 1:
+        table,bins=np.histogramdd(pos,bins=nbins, weights=mas)
+        bin_size = min((np.max(bins,axis=1)-np.min(bins,axis=1))/nbins)
+        centeridx = np.where(table==table.max())
+        le = np.array([bins[0][centeridx[0][0]],
+                       bins[1][centeridx[1][0]],
+                       bins[2][centeridx[2][0]]])
+        re = np.array([bins[0][centeridx[0][0]+1],
+                       bins[1][centeridx[1][0]+1],
+                       bins[2][centeridx[2][0]+1]])
+        center = 0.5*(le+re)
+        idx = calc_radius(pos,center)<bin_size
+        pos, mas = pos[idx],mas[idx]
+        density = max(density,mas.sum()/bin_size**3.0)
+    return density, center
+def _combParticleDensityCenter(data,densities,centers):
+    i = np.argmax(densities)
+    return densities[i],centers[i]
+
+add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
+             combine_function=_combParticleDensityCenter,n_ret=2)

File yt/data_objects/tests/test_fields.py

     pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
     pf.current_redshift = 0.0001
     pf.hubble_constant = 0.7
+    pf.omega_matter = 0.27
     for unit in mpc_conversion:
         pf.units[unit+'h'] = pf.units[unit]
         pf.units[unit+'cm'] = pf.units[unit]

File yt/frontends/art/data_structures.py

 import stat
 import weakref
 import cStringIO
+import difflib
+import glob
 
 from yt.funcs import *
 from yt.geometry.oct_geometry_handler import \
 from yt.geometry.geometry_handler import \
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
-      StaticOutput
+    StaticOutput
 from yt.geometry.oct_container import \
-    RAMSESOctreeContainer
+    ARTOctreeContainer
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 from .fields import \
     get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-from .definitions import *
-from .io import _read_frecord
-from .io import _read_record
-from .io import _read_struct
+from yt.frontends.art.definitions import *
+from yt.utilities.fortran_utils import *
 from .io import _read_art_level_info
 from .io import _read_child_mask_level
 from .io import _read_child_level
 from .io import _read_root_level
-from .io import _read_record_size
-from .io import _skip_record
 from .io import _count_art_octs
 from .io import b2t
 
-
 import yt.frontends.ramses._ramses_reader as _ramses_reader
 
 from .fields import ARTFieldInfo, KnownARTFields
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
+
 class ARTGeometryHandler(OctreeGeometryHandler):
-    def __init__(self,pf,data_style="art"):
-        """
-        Life is made simpler because we only have one AMR file
-        and one domain. However, we are matching to the RAMSES
-        multi-domain architecture.
-        """
+    def __init__(self, pf, data_style="art"):
         self.fluid_field_list = fluid_fields
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.max_level = pf.max_level
         self.float_type = np.float64
-        super(ARTGeometryHandler,self).__init__(pf,data_style)
+        super(ARTGeometryHandler, self).__init__(pf, data_style)
+
+    def get_smallest_dx(self):
+        """
+        Returns (in code units) the smallest cell size in the simulation.
+        """
+        # Overloaded
+        pf = self.parameter_file
+        return (1.0/pf.domain_dimensions.astype('f8') /
+                (2**self.max_level)).min()
 
     def _initialize_oct_handler(self):
         """
         allocate the requisite memory in the oct tree
         """
         nv = len(self.fluid_field_list)
-        self.domains = [ARTDomainFile(self.parameter_file,1,nv)]
+        self.domains = [ARTDomainFile(self.parameter_file, l+1, nv, l)
+                        for l in range(self.pf.max_level)]
         self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
         self.total_octs = sum(self.octs_per_domain)
-        self.oct_handler = RAMSESOctreeContainer(
-            self.parameter_file.domain_dimensions/2, #dd is # of root cells
+        self.oct_handler = ARTOctreeContainer(
+            self.parameter_file.domain_dimensions/2,  # dd is # of root cells
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
         mylog.debug("Allocating %s octs", self.total_octs)
         self.oct_handler.allocate_domains(self.octs_per_domain)
         for domain in self.domains:
-            domain._read_amr(self.oct_handler)
+            if domain.domain_level == 0:
+                domain._read_amr_root(self.oct_handler)
+            else:
+                domain._read_amr_level(self.oct_handler)
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields + particle_star_fields)
+        self.field_list = set(fluid_fields + particle_fields +
+                              particle_star_fields)
         self.field_list = list(self.field_list)
-    
+        # now generate all of the possible particle fields
+        if "wspecies" in self.parameter_file.parameters.keys():
+            wspecies = self.parameter_file.parameters['wspecies']
+            nspecies = len(wspecies)
+            self.parameter_file.particle_types = ["all", "darkmatter", "stars"]
+            for specie in range(nspecies):
+                self.parameter_file.particle_types.append("specie%i" % specie)
+        else:
+            self.parameter_file.particle_types = []
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         super(ARTGeometryHandler, self)._setup_classes(dd)
     def _identify_base_chunk(self, dobj):
         """
         Take the passed in data source dobj, and use its embedded selector
-        to calculate the domain mask, build the reduced domain 
+        to calculate the domain mask, build the reduced domain
         subsets and oct counts. Attach this information to dobj.
         """
         if getattr(dobj, "_chunk_info", None) is None:
-            #Get all octs within this oct handler
+            # Get all octs within this oct handler
             mask = dobj.selector.select_octs(self.oct_handler)
-            if mask.sum()==0:
+            if mask.sum() == 0:
                 mylog.debug("Warning: selected zero octs")
             counts = self.oct_handler.count_cells(dobj.selector, mask)
-            #For all domains, figure out how many counts we have 
-            #and build a subset=mask of domains 
-            subsets = [ARTDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
+            # For all domains, figure out how many counts we have
+            # and build a subset=mask of domains
+            subsets = []
+            for d, c in zip(self.domains, counts):
+                if c < 1:
+                    continue
+                subset = ARTDomainSubset(d, mask, c, d.domain_level)
+                subsets.append(subset)
             dobj._chunk_info = subsets
             dobj.size = sum(counts)
             dobj.shape = (dobj.size,)
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        #We pass the chunk both the current chunk and list of chunks,
-        #as well as the referring data source
+        # We pass the chunk both the current chunk and list of chunks,
+        # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
     def _chunk_spatial(self, dobj, ngz):
     def _chunk_io(self, dobj):
         """
         Since subsets are calculated per domain,
-        i.e. per file, yield each domain at a time to 
+        i.e. per file, yield each domain at a time to
         organize by IO. We will eventually chunk out NMSU ART
         to be level-by-level.
         """
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
 
+
 class ARTStaticOutput(StaticOutput):
     _hierarchy_class = ARTGeometryHandler
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
 
-    def __init__(self,filename,data_style='art',
-                 fields = None, storage_filename = None,
-                 skip_particles=False,skip_stars=False,
-                 limit_level=None,spread_age=True):
+    def __init__(self, filename, data_style='art',
+                 fields=None, storage_filename=None,
+                 skip_particles=False, skip_stars=False,
+                 limit_level=None, spread_age=True,
+                 force_max_level=None, file_particle_header=None,
+                 file_particle_data=None, file_particle_stars=None):
         if fields is None:
             fields = fluid_fields
         filename = os.path.abspath(filename)
         self._fields_in_file = fields
+        self._file_amr = filename
+        self._file_particle_header = file_particle_header
+        self._file_particle_data = file_particle_data
+        self._file_particle_stars = file_particle_stars
         self._find_files(filename)
-        self.file_amr = filename
         self.parameter_filename = filename
         self.skip_particles = skip_particles
         self.skip_stars = skip_stars
         self.limit_level = limit_level
         self.max_level = limit_level
+        self.force_max_level = force_max_level
         self.spread_age = spread_age
-        self.domain_left_edge = np.zeros(3,dtype='float')
-        self.domain_right_edge = np.zeros(3,dtype='float')+1.0
-        StaticOutput.__init__(self,filename,data_style)
+        self.domain_left_edge = np.zeros(3, dtype='float')
+        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
+        StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
-    def _find_files(self,file_amr):
+    def _find_files(self, file_amr):
         """
         Given the AMR base filename, attempt to find the
         particle header, star files, etc.
         """
-        prefix,suffix = filename_pattern['amr'].split('%s')
-        affix = os.path.basename(file_amr).replace(prefix,'')
-        affix = affix.replace(suffix,'')
-        affix = affix.replace('_','')
-        full_affix = affix
-        affix = affix[1:-1]
-        dirname = os.path.dirname(file_amr)
-        for fp in (filename_pattern_hf,filename_pattern):
-            for filetype, pattern in fp.items():
-                #if this attribute is already set skip it
-                if getattr(self,"file_"+filetype,None) is not None:
-                    continue
-                #sometimes the affix is surrounded by an extraneous _
-                #so check for an extra character on either side
-                check_filename = dirname+'/'+pattern%('?%s?'%affix)
-                filenames = glob.glob(check_filename)
-                if len(filenames)>1:
-                    check_filename_strict = \
-                            dirname+'/'+pattern%('?%s'%full_affix[1:])
-                    filenames = glob.glob(check_filename_strict)
-                
-                if len(filenames)==1:
-                    setattr(self,"file_"+filetype,filenames[0])
-                    mylog.info('discovered %s:%s',filetype,filenames[0])
-                elif len(filenames)>1:
-                    setattr(self,"file_"+filetype,None)
-                    mylog.info("Ambiguous number of files found for %s",
-                            check_filename)
-                    for fn in filenames:
-                        faffix = float(affix)
-                else:
-                    setattr(self,"file_"+filetype,None)
+        base_prefix, base_suffix = filename_pattern['amr']
+        possibles = glob.glob(os.path.dirname(file_amr)+"/*")
+        for filetype, (prefix, suffix) in filename_pattern.iteritems():
+            # if this attribute is already set skip it
+            if getattr(self, "_file_"+filetype, None) is not None:
+                continue
+            stripped = file_amr.replace(base_prefix, prefix)
+            stripped = stripped.replace(base_suffix, suffix)
+            match, = difflib.get_close_matches(stripped, possibles, 1, 0.6)
+            if match is not None:
+                mylog.info('discovered %s:%s', filetype, match)
+                setattr(self, "_file_"+filetype, match)
+            else:
+                setattr(self, "_file_"+filetype, None)
 
     def __repr__(self):
-        return self.file_amr.rsplit(".",1)[0]
+        return self._file_amr.split('/')[-1]
 
     def _set_units(self):
         """
-        Generates the conversion to various physical units based 
-		on the parameters from the header
+        Generates the conversion to various physical units based
+                on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0
 
-        #spatial units
-        z   = self.current_redshift
-        h   = self.hubble_constant
+        # spatial units
+        z = self.current_redshift
+        h = self.hubble_constant
         boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
 
-        #all other units
+        # all other units
         wmu = self.parameters["wmu"]
         Om0 = self.parameters['Om0']
-        ng  = self.parameters['ng']
+        ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
-        boxh   = self.parameters['boxh'] 
-        aexpn  = self.parameters["aexpn"]
+        boxh = self.parameters['boxh']
+        aexpn = self.parameters["aexpn"]
         hubble = self.parameters['hubble']
 
         cf = defaultdict(lambda: 1.0)
         r0 = boxh/ng
-        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        P0 = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0  # [K]
         S_0 = 52.077 * wmu**(5.0/3.0)
         S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        #v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        # v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         v0 = 50.0*r0*np.sqrt(Om0)
         t0 = r0/v0
         rho1 = 1.8791e-29 * hubble**2.0 * self.omega_matter
         rho0 = 2.776e11 * hubble**2.0 * Om0
-        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        tr = 2./3. * (3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))
         aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        cf['r0']=r0
-        cf['P0']=P0
-        cf['T_0']=T_0
-        cf['S_0']=S_0
-        cf['v0']=v0
-        cf['t0']=t0
-        cf['rho0']=rho0
-        cf['rho1']=rho1
-        cf['tr']=tr
-        cf['aM0']=aM0
+        cf['r0'] = r0
+        cf['P0'] = P0
+        cf['T_0'] = T_0
+        cf['S_0'] = S_0
+        cf['v0'] = v0
+        cf['t0'] = t0
+        cf['rho0'] = rho0
+        cf['rho1'] = rho1
+        cf['tr'] = tr
+        cf['aM0'] = aM0
 
-        #factors to multiply the native code units to CGS
-        cf['Pressure'] = P0 #already cgs
-        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        # factors to multiply the native code units to CGS
+        cf['Pressure'] = P0  # already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5  # proper cm/s
         cf["Mass"] = aM0 * 1.98892e33
         cf["Density"] = rho1*(aexpn**-3.0)
         cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
         cf["Potential"] = 1.0
         cf["Entropy"] = S_0
         cf["Temperature"] = tr
+        cf["Time"] = 1.0
+        cf["particle_mass"] = cf['Mass']
+        cf["particle_mass_initial"] = cf['Mass']
         self.cosmological_simulation = True
         self.conversion_factors = cf
-        
-        for particle_field in particle_fields:
-            self.conversion_factors[particle_field] =  1.0
+
         for ax in 'xyz':
             self.conversion_factors["%s-velocity" % ax] = 1.0
+        for pt in particle_fields:
+            if pt not in self.conversion_factors.keys():
+                self.conversion_factors[pt] = 1.0
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
 
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters.update(constants)
-        #read the amr header
-        with open(self.file_amr,'rb') as f:
-            amr_header_vals = _read_struct(f,amr_header_struct)
-            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-                _skip_record(f)
-            (self.ncell,) = struct.unpack('>l', _read_record(f))
+        self.parameters['Time'] = 1.0
+        # read the amr header
+        with open(self._file_amr, 'rb') as f:
+            amr_header_vals = read_attrs(f, amr_header_struct, '>')
+            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
+                skipped = skip(f, endian='>')
+            (self.ncell) = read_vector(f, 'i', '>')[0]
             # Try to figure out the root grid dimensions
             est = int(np.rint(self.ncell**(1.0/3.0)))
             # Note here: this is the number of *cells* on the root grid.
             # This is not the same as the number of Octs.
-            #domain dimensions is the number of root *cells*
+            # domain dimensions is the number of root *cells*
             self.domain_dimensions = np.ones(3, dtype='int64')*est
             self.root_grid_mask_offset = f.tell()
             self.root_nocts = self.domain_dimensions.prod()/8
             self.root_ncells = self.root_nocts*8
-            mylog.debug("Estimating %i cells on a root grid side,"+ \
-                        "%i root octs",est,self.root_nocts)
-            self.root_iOctCh = _read_frecord(f,'>i')[:self.root_ncells]
+            mylog.debug("Estimating %i cells on a root grid side," +
+                        "%i root octs", est, self.root_nocts)
+            self.root_iOctCh = read_vector(f, 'i', '>')[:self.root_ncells]
             self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
-                 order='F')
+                                                        order='F')
             self.root_grid_offset = f.tell()
-            #_skip_record(f) # hvar
-            #_skip_record(f) # var
-            self.root_nhvar = _read_frecord(f,'>f',size_only=True)
-            self.root_nvar  = _read_frecord(f,'>f',size_only=True)
-            #make sure that the number of root variables is a multiple of rootcells
-            assert self.root_nhvar%self.root_ncells==0
-            assert self.root_nvar%self.root_ncells==0
-            self.nhydro_variables = ((self.root_nhvar+self.root_nvar)/ 
-                                    self.root_ncells)
-            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.root_nhvar = skip(f, endian='>')
+            self.root_nvar = skip(f, endian='>')
+            # make sure that the number of root variables is a multiple of
+            # rootcells
+            assert self.root_nhvar % self.root_ncells == 0
+            assert self.root_nvar % self.root_ncells == 0
+            self.nhydro_variables = ((self.root_nhvar+self.root_nvar) /
+                                     self.root_ncells)
+            self.iOctFree, self.nOct = read_vector(f, 'i', '>')
             self.child_grid_offset = f.tell()
             self.parameters.update(amr_header_vals)
             self.parameters['ncell0'] = self.parameters['ng']**3
-        #read the particle header
-        if not self.skip_particles and self.file_particle_header:
-            with open(self.file_particle_header,"rb") as fh:
-                particle_header_vals = _read_struct(fh,particle_header_struct)
+            # estimate the root level
+            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
+                f,
+                [0, self.child_grid_offset], 1,
+                coarse_grid=self.domain_dimensions[0])
+            del float_center, fl, iocts, nocts
+            self.root_level = root_level
+            mylog.info("Using root level of %02i", self.root_level)
+        # read the particle header
+        if not self.skip_particles and self._file_particle_header:
+            with open(self._file_particle_header, "rb") as fh:
+                particle_header_vals = read_attrs(
+                    fh, particle_header_struct, '>')
                 fh.seek(seek_extras)
                 n = particle_header_vals['Nspecies']
-                wspecies = np.fromfile(fh,dtype='>f',count=10)
-                lspecies = np.fromfile(fh,dtype='>i',count=10)
+                wspecies = np.fromfile(fh, dtype='>f', count=10)
+                lspecies = np.fromfile(fh, dtype='>i', count=10)
             self.parameters['wspecies'] = wspecies[:n]
             self.parameters['lspecies'] = lspecies[:n]
             ls_nonzero = np.diff(lspecies)[:n-1]
-            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            self.star_type = len(ls_nonzero)
+            mylog.info("Discovered %i species of particles", len(ls_nonzero))
             mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
-                *ls_nonzero)
-            for k,v in particle_header_vals.items():
+                       *ls_nonzero)
+            for k, v in particle_header_vals.items():
                 if k in self.parameters.keys():
                     if not self.parameters[k] == v:
-                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e",k,v,
-                                   self.parameters[k])
+                        mylog.info(
+                            "Inconsistent parameter %s %1.1e  %1.1e", k, v,
+                            self.parameters[k])
                 else:
-                    self.parameters[k]=v
+                    self.parameters[k] = v
             self.parameters_particles = particle_header_vals
-    
-        #setup standard simulation params yt expects to see
+
+        # setup standard simulation params yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
         self.omega_lambda = amr_header_vals['Oml0']
         self.omega_matter = amr_header_vals['Om0']
         self.hubble_constant = amr_header_vals['hubble']
         self.min_level = amr_header_vals['min_level']
         self.max_level = amr_header_vals['max_level']
-        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
+        if self.limit_level is not None:
+            self.max_level = min(
+                self.limit_level, amr_header_vals['max_level'])
+        if self.force_max_level is not None:
+            self.max_level = self.force_max_level
+        self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
         self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+        mylog.info("Max level is %02i", self.max_level)
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
-        fn = ("%s" % (os.path.basename(args[0])))
         f = ("%s" % args[0])
-        prefix, suffix = filename_pattern['amr'].split('%s')
-        if fn.endswith(suffix) and fn.startswith(prefix) and\
-                os.path.exists(f): 
+        prefix, suffix = filename_pattern['amr']
+        with open(f, 'rb') as fh:
+            try:
+                amr_header_vals = read_attrs(fh, amr_header_struct, '>')
                 return True
+            except AssertionError:
+                return False
         return False
 
+
 class ARTDomainSubset(object):
-    def __init__(self, domain, mask, cell_count):
+    def __init__(self, domain, mask, cell_count, domain_level):
         self.mask = mask
         self.domain = domain
         self.oct_handler = domain.pf.h.oct_handler
         self.cell_count = cell_count
+        self.domain_level = domain_level
         level_counts = self.oct_handler.count_levels(
             self.domain.pf.max_level, self.domain.domain_id, mask)
         assert(level_counts.sum() == cell_count)
     def select_fwidth(self, dobj):
         base_dx = 1.0/self.domain.pf.domain_dimensions
         widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.ires(dobj))
+        dds = (2**self.select_ires(dobj))
         for i in range(3):
-            widths[:,i] = base_dx[i] / dds
+            widths[:, i] = base_dx[i] / dds
         return widths
 
-    def fill(self, content, fields):
+    def fill_root(self, content, ftfields):
         """
         This is called from IOHandler. It takes content
         which is a binary stream, reads the requested field
         the order they are in in the octhandler.
         """
         oct_handler = self.oct_handler
-        all_fields  = self.domain.pf.h.fluid_field_list
-        fields = [f for ft, f in fields]
-        dest= {}
-        filled = pos = level_offset = 0
+        all_fields = self.domain.pf.h.fluid_field_list
+        fields = [f for ft, f in ftfields]
+        level_offset = 0
         field_idxs = [all_fields.index(f) for f in fields]
+        dest = {}
         for field in fields:
-            dest[field] = np.zeros(self.cell_count, 'float64')
-        for level, offset in enumerate(self.domain.level_offsets):
-            no = self.domain.level_count[level]
-            if level==0:
-                data = _read_root_level(content,self.domain.level_child_offsets,
-                                       self.domain.level_count)
-                data = data[field_idxs,:]
-            else:
-                data = _read_child_level(content,self.domain.level_child_offsets,
-                                         self.domain.level_offsets,
-                                         self.domain.level_count,level,fields,
-                                         self.domain.pf.domain_dimensions,
-                                         self.domain.pf.parameters['ncell0'])
-            source= {}
-            for i,field in enumerate(fields):
-                source[field] = np.empty((no, 8), dtype="float64")
-                source[field][:,:] = np.reshape(data[i,:],(no,8))
-            level_offset += oct_handler.fill_level(self.domain.domain_id, 
-                                   level, dest, source, self.mask, level_offset)
+            dest[field] = np.zeros(self.cell_count, 'float64')-1.
+        level = self.domain_level
+        source = {}
+        data = _read_root_level(content, self.domain.level_child_offsets,
+                                self.domain.level_count)
+        for field, i in zip(fields, field_idxs):
+            temp = np.reshape(data[i, :], self.domain.pf.domain_dimensions,
+                              order='F').astype('float64').T
+            source[field] = temp
+        level_offset += oct_handler.fill_level_from_grid(
+            self.domain.domain_id,
+            level, dest, source, self.mask, level_offset)
         return dest
 
+    def fill_level(self, content, ftfields):
+        oct_handler = self.oct_handler
+        fields = [f for ft, f in ftfields]
+        level_offset = 0
+        dest = {}
+        for field in fields:
+            dest[field] = np.zeros(self.cell_count, 'float64')-1.
+        level = self.domain_level
+        no = self.domain.level_count[level]
+        noct_range = [0, no]
+        source = _read_child_level(
+            content, self.domain.level_child_offsets,
+            self.domain.level_offsets,
+            self.domain.level_count, level, fields,
+            self.domain.pf.domain_dimensions,
+            self.domain.pf.parameters['ncell0'],
+            noct_range=noct_range)
+        nocts_filling = noct_range[1]-noct_range[0]
+        level_offset += oct_handler.fill_level(self.domain.domain_id,
+                                               level, dest, source,
+                                               self.mask, level_offset,
+                                               noct_range[0],
+                                               nocts_filling)
+        return dest
+
+
 class ARTDomainFile(object):
     """
     Read in the AMR, left/right edges, fill out the octhandler
     """
-    #We already read in the header in static output,
-    #and since these headers are defined in only a single file it's
-    #best to leave them in the static output
+    # We already read in the header in static output,
+    # and since these headers are defined in only a single file it's
+    # best to leave them in the static output
     _last_mask = None
     _last_seletor_id = None
 
-    def __init__(self,pf,domain_id,nvar):
+    def __init__(self, pf, domain_id, nvar, level):
         self.nvar = nvar
         self.pf = pf
         self.domain_id = domain_id
+        self.domain_level = level
         self._level_count = None
         self._level_oct_offsets = None
         self._level_child_offsets = None
 
     @property
     def level_count(self):
-        #this is number of *octs*
-        if self._level_count is not None: return self._level_count
+        # this is number of *octs*
+        if self._level_count is not None:
+            return self._level_count
         self.level_offsets
-        return self._level_count
+        return self._level_count[self.domain_level]
 
     @property
     def level_child_offsets(self):
-        if self._level_count is not None: return self._level_child_offsets
+        if self._level_count is not None:
+            return self._level_child_offsets
         self.level_offsets
         return self._level_child_offsets
 
     @property
-    def level_offsets(self): 
-        #this is used by the IO operations to find the file offset,
-        #and then start reading to fill values
-        #note that this is called hydro_offset in ramses
-        if self._level_oct_offsets is not None: 
+    def level_offsets(self):
+        # this is used by the IO operations to find the file offset,
+        # and then start reading to fill values
+        # note that this is called hydro_offset in ramses
+        if self._level_oct_offsets is not None:
             return self._level_oct_offsets
         # We now have to open the file and calculate it
-        f = open(self.pf.file_amr, "rb")
+        f = open(self.pf._file_amr, "rb")
         nhydrovars, inoll, _level_oct_offsets, _level_child_offsets = \
             _count_art_octs(f,  self.pf.child_grid_offset, self.pf.min_level,
                             self.pf.max_level)
-        #remember that the root grid is by itself; manually add it back in
+        # remember that the root grid is by itself; manually add it back in
         inoll[0] = self.pf.domain_dimensions.prod()/8
         _level_child_offsets[0] = self.pf.root_grid_offset
         self.nhydrovars = nhydrovars
-        self.inoll = inoll #number of octs
+        self.inoll = inoll  # number of octs
         self._level_oct_offsets = _level_oct_offsets
         self._level_child_offsets = _level_child_offsets
         self._level_count = inoll
         return self._level_oct_offsets
-    
-    def _read_amr(self, oct_handler):
+
+    def _read_amr_level(self, oct_handler):
         """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain 
+           For each oct, only the position, index, level and domain
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
         """
-        #on the root level we typically have 64^3 octs
-        #giving rise to 128^3 cells
-        #but on level 1 instead of 128^3 octs, we have 256^3 octs
-        #leave this code here instead of static output - it's memory intensive
         self.level_offsets
-        f = open(self.pf.file_amr, "rb")
-        #add the root *cell* not *oct* mesh
+        f = open(self.pf._file_amr, "rb")
+        level = self.domain_level
+        unitary_center, fl, iocts, nocts, root_level = _read_art_level_info(
+            f,
+            self._level_oct_offsets, level,
+            coarse_grid=self.pf.domain_dimensions[0],
+            root_level=self.pf.root_level)
+        nocts_check = oct_handler.add(self.domain_id, level, nocts,
+                                      unitary_center, self.domain_id)
+        assert(nocts_check == nocts)
+        mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
+                    nocts, level, oct_handler.nocts)
+
+    def _read_amr_root(self, oct_handler):
+        self.level_offsets
+        f = open(self.pf._file_amr, "rb")
+        # add the root *cell* not *oct* mesh
+        level = self.domain_level
         root_octs_side = self.pf.domain_dimensions[0]/2
         NX = np.ones(3)*root_octs_side
+        octs_side = NX*2**level
         LE = np.array([0.0, 0.0, 0.0], dtype='float64')
         RE = np.array([1.0, 1.0, 1.0], dtype='float64')
         root_dx = (RE - LE) / NX
         LL = LE + root_dx/2.0
         RL = RE - root_dx/2.0
-        #compute floating point centers of root octs
-        root_fc= np.mgrid[LL[0]:RL[0]:NX[0]*1j,
-                          LL[1]:RL[1]:NX[1]*1j,
-                          LL[2]:RL[2]:NX[2]*1j ]
-        root_fc= np.vstack([p.ravel() for p in root_fc]).T
-        nocts_check = oct_handler.add(1, 0, root_octs_side**3,
+        # compute floating point centers of root octs
+        root_fc = np.mgrid[LL[0]:RL[0]:NX[0]*1j,
+                           LL[1]:RL[1]:NX[1]*1j,
+                           LL[2]:RL[2]:NX[2]*1j]
+        root_fc = np.vstack([p.ravel() for p in root_fc]).T
+        nocts_check = oct_handler.add(self.domain_id, level,
+                                      root_octs_side**3,
                                       root_fc, self.domain_id)
         assert(oct_handler.nocts == root_fc.shape[0])
-        nocts_added = root_fc.shape[0]
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                    root_octs_side**3, 0,nocts_added)
-        for level in xrange(1, self.pf.max_level+1):
-            left_index, fl, iocts, nocts,root_level = _read_art_level_info(f, 
-                self._level_oct_offsets,level,
-                coarse_grid=self.pf.domain_dimensions[0])
-            left_index/=2
-            #at least one of the indices should be odd
-            #assert np.sum(left_index[:,0]%2==1)>0
-            octs_side = NX*2**level
-            float_left_edge = left_index.astype("float64") / octs_side
-            float_center = float_left_edge + 0.5*1.0/octs_side
-            #all floatin unitary positions should fit inside the domain
-            assert np.all(float_center<1.0)
-            nocts_check = oct_handler.add(1,level, nocts, float_left_edge, self.domain_id)
-            nocts_added += nocts
-            assert(oct_handler.nocts == nocts_added)
-            mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                        nocts, level,nocts_added)
+                    root_octs_side**3, 0, oct_handler.nocts)
 
     def select(self, selector):
         if id(selector) == self._last_selector_id:
 
     def count(self, selector):
         if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
+            if self._last_mask is None:
+                return 0
             return self._last_mask.sum()
         self.select(selector)
         return self.count(selector)
-

File yt/frontends/art/definitions.py

 
 """
 
-fluid_fields= [ 
+# If not otherwise specified, we are big endian
+endian = '>'
+
+fluid_fields = [
     'Density',
     'TotalEnergy',
     'XMomentumDensity',
     'PotentialOld'
 ]
 
-hydro_struct = [('pad1','>i'),('idc','>i'),('iOctCh','>i')]
+hydro_struct = [('pad1', '>i'), ('idc', '>i'), ('iOctCh', '>i')]
 for field in fluid_fields:
-    hydro_struct += (field,'>f'),
-hydro_struct += ('pad2','>i'),
+    hydro_struct += (field, '>f'),
+hydro_struct += ('pad2', '>i'),
 
-particle_fields= [
-    'particle_age',
+particle_fields = [
+    'particle_mass',  # stars have variable mass
     'particle_index',
-    'particle_mass',
-    'particle_mass_initial',
-    'particle_creation_time',
-    'particle_metallicity1',
-    'particle_metallicity2',
-    'particle_metallicity',
+    'particle_type',
     'particle_position_x',
     'particle_position_y',
     'particle_position_z',
     'particle_velocity_x',
     'particle_velocity_y',
     'particle_velocity_z',
-    'particle_type',
-    'particle_index'
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
 ]
 
 particle_star_fields = [
-    'particle_age',
     'particle_mass',
     'particle_mass_initial',
     'particle_creation_time',
     'particle_metallicity',
 ]
 
-filename_pattern = {				
-	'amr':'10MpcBox_csf512_%s.d',
-	'particle_header':'PMcrd%s.DAT',
-	'particle_data':'PMcrs0%s.DAT',
-	'particle_stars':'stars_%s.dat'
-}
 
-filename_pattern_hf = {				
-	'particle_header':'PMcrd_%s.DAT',
-	'particle_data':'PMcrs0_%s.DAT',
+filename_pattern = {
+    'amr': ['10MpcBox_', '.d'],
+    'particle_header': ['PMcrd', '.DAT'],
+    'particle_data': ['PMcrs', '.DAT'],
+    'particle_stars': ['stars', '.dat']
 }
 
 amr_header_struct = [
-    ('>i','pad byte'),
-    ('>256s','jname'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','istep'),
-    ('>d','t'),
-    ('>d','dt'),
-    ('>f','aexpn'),
-    ('>f','ainit'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','boxh'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','Omb0'),
-    ('>f','hubble'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','nextras'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','extra1'),
-    ('>f','extra2'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>256s','lextra'),
-    ('>256s','lextra'),
-    ('>i','pad byte'),
-    ('>i', 'pad byte'),
-    ('>i', 'min_level'),
-    ('>i', 'max_level'),
-    ('>i', 'pad byte'),
+    ('jname', 1, '256s'),
+    (('istep', 't', 'dt', 'aexpn', 'ainit'), 1, 'iddff'),
+    (('boxh', 'Om0', 'Oml0', 'Omb0', 'hubble'), 5, 'f'),
+    ('nextras', 1, 'i'),
+    (('extra1', 'extra2'), 2, 'f'),
+    ('lextra', 1, '512s'),
+    (('min_level', 'max_level'), 2, 'i')
 ]
 
-particle_header_struct =[
-    ('>i','pad'),
-    ('45s','header'), 
-    ('>f','aexpn'),
-    ('>f','aexp0'),
-    ('>f','amplt'),
-    ('>f','astep'),
-    ('>i','istep'),
-    ('>f','partw'),
-    ('>f','tintg'),
-    ('>f','Ekin'),
-    ('>f','Ekin1'),
-    ('>f','Ekin2'),
-    ('>f','au0'),
-    ('>f','aeu0'),
-    ('>i','Nrow'),
-    ('>i','Ngridc'),
-    ('>i','Nspecies'),
-    ('>i','Nseed'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','hubble'),
-    ('>f','Wp5'),
-    ('>f','Ocurv'),
-    ('>f','Omb0'),
-    ('>%ds'%(396),'extras'),
-    ('>f','unknown'),
-    ('>i','pad')
+particle_header_struct = [
+    (('header',
+     'aexpn', 'aexp0', 'amplt', 'astep',
+     'istep',
+     'partw', 'tintg',
+     'Ekin', 'Ekin1', 'Ekin2',
+     'au0', 'aeu0',
+     'Nrow', 'Ngridc', 'Nspecies', 'Nseed',
+     'Om0', 'Oml0', 'hubble', 'Wp5', 'Ocurv', 'Omb0',
+     'extras', 'unknown'),
+     1,
+     '45sffffi'+'fffffff'+'iiii'+'ffffff'+'396s'+'f')
 ]
 
 star_struct = [
-        ('>d',('tdum','adum')),
-        ('>i','nstars'),
-        ('>d',('ws_old','ws_oldi')),
-        ('>f','mass'),
-        ('>f','imass'),
-        ('>f','tbirth'),
-        ('>f','metallicity1'),
-        ('>f','metallicity2')
-        ]
+    ('>d', ('tdum', 'adum')),
+    ('>i', 'nstars'),
+    ('>d', ('ws_old', 'ws_oldi')),
+    ('>f', 'particle_mass'),
+    ('>f', 'particle_mass_initial'),
+    ('>f', 'particle_creation_time'),
+    ('>f', 'particle_metallicity1'),
+    ('>f', 'particle_metallicity2')
+]
 
 star_name_map = {
-        'particle_mass':'mass',
-        'particle_mass_initial':'imass',
-        'particle_age':'tbirth',
-        'particle_metallicity1':'metallicity1',
-        'particle_metallicity2':'metallicity2',
-        'particle_metallicity':'metallicity',
-        }
+    'particle_mass': 'mass',
+    'particle_mass_initial': 'imass',
+    'particle_creation_time': 'tbirth',
+    'particle_metallicity1': 'metallicity1',
+    'particle_metallicity2': 'metallicity2',
+    'particle_metallicity': 'metallicity',
+}
 
 constants = {
-    "Y_p":0.245,
-    "gamma":5./3.,
-    "T_CMB0":2.726,
-    "T_min":300.,
-    "ng":128,
-    "wmu":4.0/(8.0-5.0*0.245)
+    "Y_p": 0.245,
+    "gamma": 5./3.,
+    "T_CMB0": 2.726,
+    "T_min": 300.,
+    "ng": 128,
+    "wmu": 4.0/(8.0-5.0*0.245)
 }
 
 seek_extras = 137

File yt/frontends/art/fields.py

 
 Author: Matthew Turk <matthewturk@gmail.com>
 Affiliation: UCSD
+Author: Chris Moody <matthewturk@gmail.com>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
     ValidateGridType
 import yt.data_objects.universal_fields
 import yt.utilities.lib as amr_utils
+from yt.utilities.physical_constants import mass_sun_cgs
+from yt.frontends.art.definitions import *
 
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
-
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as np
+for f in fluid_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)])
 
-#these are just the hydro fields
-known_art_fields = [ 'Density','TotalEnergy',
-                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-                     'Pressure','Gamma','GasEnergy',
-                     'MetalDensitySNII', 'MetalDensitySNIa',
-                     'PotentialNew','PotentialOld']
-
-#Add the fields, then later we'll individually defined units and names
-for f in known_art_fields:
+for f in particle_fields:
     add_art_field(f, function=NullFunc, take_log=True,
-              validators = [ValidateDataField(f)])
-
-#Hydro Fields that are verified to be OK unit-wise:
-#Density
-#Temperature
-#metallicities
-#MetalDensity SNII + SNia
-
-#Hydro Fields that need to be tested:
-#TotalEnergy
-#XYZMomentum
-#Pressure
-#Gamma
-#GasEnergy
-#Potentials
-#xyzvelocity
-
-#Particle fields that are tested:
-#particle_position_xyz
-#particle_type
-#particle_index
-#particle_mass
-#particle_mass_initial
-#particle_age
-#particle_velocity
-#particle_metallicity12
-
-#Particle fields that are untested:
-#NONE
-
-#Other checks:
-#CellMassMsun == Density * CellVolume
+                  validators=[ValidateDataField(f)],
+                  particle_type=True)
+add_art_field("particle_mass", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
 
 def _convertDensity(data):
     return data.convert("Density")
 KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["Density"]._convert_function=_convertDensity
+KnownARTFields["Density"]._convert_function = _convertDensity
 
 def _convertTotalEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["TotalEnergy"]._projected_units = r"\rm{K}"
-KnownARTFields["TotalEnergy"]._convert_function=_convertTotalEnergy
+KnownARTFields["TotalEnergy"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["TotalEnergy"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["TotalEnergy"]._convert_function = _convertTotalEnergy
 
 def _convertXMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["XMomentumDensity"]._convert_function=_convertXMomentumDensity
+KnownARTFields["XMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["XMomentumDensity"]._convert_function = _convertXMomentumDensity
 
 def _convertYMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["YMomentumDensity"]._convert_function=_convertYMomentumDensity
+KnownARTFields["YMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["YMomentumDensity"]._convert_function = _convertYMomentumDensity
 
 def _convertZMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
+KnownARTFields["ZMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["ZMomentumDensity"]._convert_function = _convertZMomentumDensity
 
 def _convertPressure(data):
     return data.convert("Pressure")
-KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{s}^2/\rm{cm}^1"
 KnownARTFields["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
-KnownARTFields["Pressure"]._convert_function=_convertPressure
+KnownARTFields["Pressure"]._convert_function = _convertPressure
 
 def _convertGamma(data):
     return 1.0
 KnownARTFields["Gamma"]._units = r""
 KnownARTFields["Gamma"]._projected_units = r""
-KnownARTFields["Gamma"]._convert_function=_convertGamma
+KnownARTFields["Gamma"]._convert_function = _convertGamma
 
 def _convertGasEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._projected_units = r""
-KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
+KnownARTFields["GasEnergy"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["GasEnergy"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["GasEnergy"]._convert_function = _convertGasEnergy
 
 def _convertMetalDensitySNII(data):
     return data.convert('Density')
 KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+KnownARTFields["MetalDensitySNII"]._convert_function = _convertMetalDensitySNII
 
 def _convertMetalDensitySNIa(data):
     return data.convert('Density')
 KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+KnownARTFields["MetalDensitySNIa"]._convert_function = _convertMetalDensitySNIa
 
 def _convertPotentialNew(data):
     return data.convert("Potential")
-KnownARTFields["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialNew"]._convert_function=_convertPotentialNew
+KnownARTFields["PotentialNew"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["PotentialNew"]._convert_function = _convertPotentialNew
 
 def _convertPotentialOld(data):
     return data.convert("Potential")
-KnownARTFields["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialOld"]._convert_function=_convertPotentialOld
+KnownARTFields["PotentialOld"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["PotentialOld"]._convert_function = _convertPotentialOld
 
 ####### Derived fields
+def _temperature(field, data):
+    tr = data["GasEnergy"]/data["Density"]
+    tr /= data.pf.conversion_factors["GasEnergy"]
+    tr *= data.pf.conversion_factors["Density"]
+    tr *= data.pf.conversion_factors['tr']
+    return tr
 
-def _temperature(field, data):
-    dg = data["GasEnergy"] #.astype('float64')
-    dg /= data.pf.conversion_factors["GasEnergy"]
-    dd = data["Density"] #.astype('float64')
-    dd /= data.pf.conversion_factors["Density"]
-    tr = dg/dd*data.pf.conversion_factors['tr']
-    #ghost cells have zero density?
-    tr[np.isnan(tr)] = 0.0
-    #dd[di] = -1.0
-    #if data.id==460:
-    #tr[di] = -1.0 #replace the zero-density points with zero temp
-    #print tr.min()
-    #assert np.all(np.isfinite(tr))
-    return tr
 def _converttemperature(data):
-    #x = data.pf.conversion_factors["Temperature"]
-    x = 1.0
-    return x
-add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+    return 1.0
+add_field("Temperature", function=_temperature,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
-    tr  = data["MetalDensitySNII"] / data["Density"]
+    tr = data["MetalDensitySNII"] / data["Density"]
     return tr
-add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity_SNII", function=_metallicity_snII,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity_SNII"]._units = r""
 ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
 
 def _metallicity_snIa(field, data):
-    tr  = data["MetalDensitySNIa"] / data["Density"]
+    tr = data["MetalDensitySNIa"] / data["Density"]
     return tr
-add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity_SNIa", function=_metallicity_snIa,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity_SNIa"]._units = r""
 ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
 
 def _metallicity(field, data):
-    tr  = data["Metal_Density"] / data["Density"]
+    tr = data["Metal_Density"] / data["Density"]
     return tr
-add_field("Metallicity", function=_metallicity, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity", function=_metallicity,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(field,data):
-    tr  = data["XMomentumDensity"]/data["Density"]
+def _x_velocity(field, data):
+    tr = data["XMomentumDensity"]/data["Density"]
     return tr
-add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("x-velocity", function=_x_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(field,data):
-    tr  = data["YMomentumDensity"]/data["Density"]
+def _y_velocity(field, data):
+    tr = data["YMomentumDensity"]/data["Density"]
     return tr
-add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("y-velocity", function=_y_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(field,data):
-    tr  = data["ZMomentumDensity"]/data["Density"]
+def _z_velocity(field, data):
+    tr = data["ZMomentumDensity"]/data["Density"]
     return tr
-add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("z-velocity", function=_z_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 def _metal_density(field, data):
-    tr  = data["MetalDensitySNIa"]
+    tr = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
     return tr
-add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metal_Density"]._units = r""
-ARTFieldInfo["Metal_Density"]._projected_units = r""
+add_field("Metal_Density", function=_metal_density,
+          units=r"\mathrm{K}", take_log=True)
+ARTFieldInfo["Metal_Density"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["Metal_Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
+# Particle fields
+def _particle_age(field, data):
+    tr = data["particle_creation_time"]
+    return data.pf.current_time - tr
+add_field("particle_age", function=_particle_age, units=r"\mathrm{s}",
+          take_log=True, particle_type=True)
 
-#Particle fields
+def spread_ages(ages, spread=1.0e7*365*24*3600):
+    # stars are formed in lumps; spread out the ages linearly
+    da = np.diff(ages)
+    assert np.all(da <= 0)
+    # ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da < 0)
+    idx += 1  # mark the right edges
+    # spread this age evenly out to the next age
+    lidx = 0
+    lage = 0
+    for i in idx:
+        n = i-lidx  # n stars affected
+        rage = ages[i]
+        lage = max(rage-spread, 0.0)
+        agesd[lidx:i] = np.linspace(lage, rage, n)
+        lidx = i
+        # lage=rage
+    # we didn't get the last iter
+    n = agesd.shape[0]-lidx
+    rage = ages[-1]
+    lage = max(rage-spread, 0.0)
+    agesd[lidx:] = np.linspace(lage, rage, n)
+    return agesd
+
+def _particle_age_spread(field, data):
+    tr = data["particle_creation_time"]
+    return spread_ages(data.pf.current_time - tr)
+
+add_field("particle_age_spread", function=_particle_age_spread,
+          particle_type=True, take_log=True, units=r"\rm{s}")
+
+def _ParticleMassMsun(field, data):
+    return data["particle_mass"]/mass_sun_cgs
+add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
+          take_log=True, units=r"\rm{Msun}")

File yt/frontends/art/io.py

 
 Author: Matthew Turk <matthewturk@gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Chris Moody <matthewturk@gmail.com>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
 from yt.utilities.io_handler import \
     BaseIOHandler
 import yt.utilities.lib as au
+from yt.utilities.fortran_utils import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.frontends.art.definitions import *
+from yt.utilities.physical_constants import sec_per_year
+
 
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
+    tb, ages = None, None
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
         for chunk in chunks:
             for subset in chunk.objs:
                 # Now we read the entire thing
-                f = open(subset.domain.pf.file_amr, "rb")
+                f = open(subset.domain.pf._file_amr, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                rv = subset.fill(f, fields)
+                if subset.domain_level == 0:
+                    rv = subset.fill_root(f, fields)
+                else:
+                    rv = subset.fill_level(f, fields)
                 for ft, f in fields:
-                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s:%s)",
-                        f, subset.cell_count, rv[f].min(), rv[f].max(),
-                        cp, cp+subset.cell_count)
+                    mylog.debug("Filling L%i %s with %s (%0.3e %0.3e) (%s:%s)",
+                                subset.domain_level,
+                                f, subset.cell_count, rv[f].min(), rv[f].max(),
+                                cp, cp+subset.cell_count)
                     tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)
                 cp += subset.cell_count
         return tr
 
     def _read_particle_selection(self, chunks, selector, fields):
-        size = 0
-        masks = {}
+        tr = {}
+        fields_read = []
         for chunk in chunks:
-            for subset in chunk.objs:
-                # We read the whole thing, then feed it back to the selector
-                offsets = []
-                f = open(subset.domain.part_fn, "rb")
-                foffsets = subset.domain.particle_field_offsets
-                selection = {}
-                for ax in 'xyz':
-                    field = "particle_position_%s" % ax
-                    f.seek(foffsets[field])
-                    selection[ax] = fpu.read_vector(f, 'd')
-                mask = selector.select_points(selection['x'],
-                            selection['y'], selection['z'])
-                if mask is None: continue
-                size += mask.sum()
-                masks[id(subset)] = mask
-        # Now our second pass
-        tr = dict((f, np.empty(size, dtype="float64")) for f in fields)
-        for chunk in chunks:
-            for subset in chunk.objs:
-                f = open(subset.domain.part_fn, "rb")
-                mask = masks.pop(id(subset), None)
-                if mask is None: continue
-                for ftype, fname in fields:
-                    offsets.append((foffsets[fname], (ftype,fname)))
-                for offset, field in sorted(offsets):
-                    f.seek(offset)
-                    tr[field] = fpu.read_vector(f, 'd')[mask]
+            level = chunk.objs[0].domain.domain_level
+            pf = chunk.objs[0].domain.pf
+            masks = {}
+            ws, ls = pf.parameters["wspecies"], pf.parameters["lspecies"]
+            sizes = np.diff(np.concatenate(([0], ls)))
+            ptmax = ws[-1]
+            npt = ls[-1]
+            nstars = ls[-1]-ls[-2]
+            file_particle = pf._file_particle_data
+            file_stars = pf._file_particle_stars
+            ftype_old = None
+            for field in fields:
+                if field in fields_read:
+                    continue
+                ftype, fname = field
+                pbool, idxa, idxb = _determine_field_size(pf, ftype, ls, ptmax)
+                npa = idxb-idxa
+                if not ftype_old == ftype:
+                    Nrow = pf.parameters["Nrow"]
+                    rp = lambda ax: read_particles(
+                        file_particle, Nrow, idxa=idxa,
+                        idxb=idxb, field=ax)
+                    x, y, z = (rp(ax) for ax in 'xyz')
+                    dd = pf.domain_dimensions[0]
+                    off = 1.0/dd
+                    x, y, z = (t.astype('f8')/dd - off for t in (x, y, z))
+                    mask = selector.select_points(x, y, z)
+                    size = mask.sum()
+                for i, ax in enumerate('xyz'):
+                    if fname.startswith("particle_position_%s" % ax):
+                        tr[field] = vars()[ax]
+                    if fname.startswith("particle_velocity_%s" % ax):
+                        tr[field] = rp('v'+ax)
+                if fname == "particle_mass":
+                    a = 0
+                    data = np.zeros(npa, dtype='f8')
+                    for ptb, size, m in zip(pbool, sizes, ws):
+                        if ptb:
+                            data[a:a+size] = m
+                            a += size
+                    tr[field] = data
+                elif fname == "particle_index":
+                    tr[field] = np.arange(idxa, idxb).astype('int64')
+                elif fname == "particle_type":
+                    a = 0
+                    data = np.zeros(npa, dtype='int')
+                    for i, (ptb, size) in enumerate(zip(pbool, sizes)):
+                        if ptb:
+                            data[a:a+size] = i
+                            a += size
+                    tr[field] = data
+                if pbool[-1] and fname in particle_star_fields:
+                    data = read_star_field(file_stars, field=fname)
+                    temp = tr.get(field, np.zeros(npa, 'f8'))
+                    if nstars > 0:
+                        temp[-nstars:] = data
+                    tr[field] = temp
+                if fname == "particle_creation_time":
+                    self.tb, self.ages, data = interpolate_ages(
+                        tr[field][-nstars:],
+                        file_stars,
+                        self.tb,
+                        self.ages,
+                        pf.current_time)
+                    temp = tr.get(field, np.zeros(npa, 'f8'))
+                    temp[-nstars:] = data
+                    tr[field] = temp
+                    del data
+                tr[field] = tr[field][mask]
+                ftype_old = ftype
+                fields_read.append(field)
+        if tr == {}:
+            tr = dict((f, np.array([])) for f in fields)
         return tr
 
 
-def _count_art_octs(f, offset, 
-                   MinLev, MaxLevelNow):
-    level_oct_offsets= [0,]
-    level_child_offsets= [0,]
+def _determine_field_size(pf, field, lspecies, ptmax):
+    pbool = np.zeros(len(lspecies), dtype="bool")
+    idxas = np.concatenate(([0, ], lspecies[:-1]))
+    idxbs = lspecies
+    if "specie" in field:
+        index = int(field.replace("specie", ""))
+        pbool[index] = True
+    elif field == "stars":
+        pbool[-1] = True
+    elif field == "darkmatter":
+        pbool[0:-1] = True
+    else:
+        pbool[:] = True
+    idxa, idxb = idxas[pbool][0], idxbs[pbool][-1]
+    return pbool, idxa, idxb
+
+
+def interpolate_ages(data, file_stars, interp_tb=None, interp_ages=None,
+                     current_time=None):
+    if interp_tb is None:
+        tdum, adum = read_star_field(file_stars,
+                                     field="tdum")
+        # timestamp of file should match amr timestamp
+        if current_time:
+            tdiff = b2t(tdum)-current_time/(sec_per_year*1e9)
+            if np.abs(tdiff) < 1e-4:
+                mylog.info("Timestamp mismatch in star " +
+                           "particle header")
+        mylog.info("Interpolating ages")
+        interp_tb, interp_ages = b2t(data)
+    temp = np.interp(data, interp_tb, interp_ages)
+    temp *= 1.0e9*sec_per_year
+    return interp_tb, interp_ages, temp
+
+
+def _count_art_octs(f, offset,
+                    MinLev, MaxLevelNow):
+    level_oct_offsets = [0, ]
+    level_child_offsets = [0, ]
     f.seek(offset)
-    nchild,ntot=8,0
+    nchild, ntot = 8, 0
     Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
-        #Get the info for this level, skip the rest
-        #print "Reading oct tree data for level", Lev
-        #print 'offset:',f.tell()
-        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
-           '>iii', _read_record(f))
-        #print 'Level %i : '%Lev, iNOLL
-        #print 'offset after level record:',f.tell()
+        # Get the info for this level, skip the rest
+        # print "Reading oct tree data for level", Lev
+        # print 'offset:',f.tell()
+        Level[Lev], iNOLL[Lev], iHOLL[Lev] = read_vector(f, 'i', '>')
+        # print 'Level %i : '%Lev, iNOLL
+        # print 'offset after level record:',f.tell()
         iOct = iHOLL[Lev] - 1
         nLevel = iNOLL[Lev]
         nLevCells = nLevel * nchild
         ntot = ntot + nLevel
 
-        #Skip all the oct hierarchy data
-        ns = _read_record_size(f)
+        # Skip all the oct hierarchy data
+        ns = peek_record_size(f, endian='>')
         size = struct.calcsize('>i') + ns + struct.calcsize('>i')
         f.seek(f.tell()+size * nLevel)
 
         level_child_offsets.append(f.tell())
-        #Skip the child vars data
-        ns = _read_record_size(f)
+        # Skip the child vars data
+        ns = peek_record_size(f, endian='>')
         size = struct.calcsize('>i') + ns + struct.calcsize('>i')
         f.seek(f.tell()+size * nLevel*nchild)
 
-        #find nhydrovars
+        # find nhydrovars
         nhydrovars = 8+2
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
+
+def _read_art_level_info(f, level_oct_offsets, level, coarse_grid=128,
+                         ncell0=None, root_level=None):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
-    #Get the info for this level, skip the rest
-    junk, nLevel, iOct = struct.unpack(
-       '>iii', _read_record(f))
-    
-    #fortran indices start at 1
-    
-    #Skip all the oct hierarchy data
-    le     = np.zeros((nLevel,3),dtype='int64')
-    fl     = np.ones((nLevel,6),dtype='int64')
-    iocts  = np.zeros(nLevel+1,dtype='int64')
-    idxa,idxb = 0,0
-    chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
+    # Get the info for this level, skip the rest
+    junk, nLevel, iOct = read_vector(f, 'i', '>')
+
+    # fortran indices start at 1
+
+    # Skip all the oct hierarchy data
+    le = np.zeros((nLevel, 3), dtype='int64')
+    fl = np.ones((nLevel, 6), dtype='int64')
+    iocts = np.zeros(nLevel+1, dtype='int64')
+    idxa, idxb = 0, 0
+    chunk = long(1e6)  # this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
-    while left > 0 :
-        this_chunk = min(chunk,left)
-        idxb=idxa+this_chunk
-        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
-        data=data.reshape(this_chunk,15)
-        left-=this_chunk
-        le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = np.arange(idxa,idxb)
-        #pad byte is last, LL2, then ioct right before it
-        iocts[idxa:idxb] = data[:,-3] 
-        idxa=idxa+this_chunk
+    while left > 0:
+        this_chunk = min(chunk, left)
+        idxb = idxa+this_chunk
+        data = np.fromfile(f, dtype='>i', count=this_chunk*15)
+        data = data.reshape(this_chunk, 15)
+        left -= this_chunk
+        le[idxa:idxb, :] = data[:, 1:4]
+        fl[idxa:idxb, 1] = np.arange(idxa, idxb)
+        # pad byte is last, LL2, then ioct right before it
+        iocts[idxa:idxb] = data[:, -3]
+        idxa = idxa+this_chunk
     del data
 
-    #emulate fortran code
+    # emulate fortran code
     #     do ic1 = 1 , nLevel
     #       read(19) (iOctPs(i,iOct),i=1,3),(iOctNb(i,iOct),i=1,6),
-    #&                iOctPr(iOct), iOctLv(iOct), iOctLL1(iOct), 
+    #&                iOctPr(iOct), iOctLv(iOct), iOctLL1(iOct),
     #&                iOctLL2(iOct)
     #       iOct = iOctLL1(iOct)
-    
-    #ioct always represents the index of the next variable
-    #not the current, so shift forward one index
-    #the last index isn't used
+
+    # ioct always represents the index of the next variable
+    # not the current, so shift forward one index
+    # the last index isn't used
     ioctso = iocts.copy()
-    iocts[1:]=iocts[:-1] #shift
-    iocts = iocts[:nLevel] #chop off the last, unused, index
-    iocts[0]=iOct #starting value
+    iocts[1:] = iocts[:-1]  # shift
+    iocts = iocts[:nLevel]  # chop off the last, unused, index
+    iocts[0] = iOct  # starting value
 
-    #now correct iocts for fortran indices start @ 1
+    # now correct iocts for fortran indices start @ 1
     iocts = iocts-1
 
     assert np.unique(iocts).shape[0] == nLevel
-    
-    #left edges are expressed as if they were on 
-    #level 15, so no matter what level max(le)=2**15 
-    #correct to the yt convention
-    #le = le/2**(root_level-1-level)-1
 
-    #try to find the root_level first
-    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
-    root_level = root_level.astype('int64')
+    # left edges are expressed as if they were on
+    # level 15, so no matter what level max(le)=2**15
+    # correct to the yt convention
+    # le = le/2**(root_level-1-level)-1
 
-    #try without the -1
-    le = le/2**(root_level+1-level)-1
+    # try to find the root_level first
+    def cfc(root_level, level, le):
+        d_x = 1.0/(2.0**(root_level-level+1))
+        fc = (d_x * le) - 2**(level-1)
+        return fc
+    if root_level is None:
+        root_level = np.floor(np.log2(le.max()*1.0/coarse_grid))
+        root_level = root_level.astype('int64')
+        for i in range(10):
+            fc = cfc(root_level, level, le)
+            go = np.diff(np.unique(fc)).min() < 1.1
+            if go:
+                break
+            root_level += 1
+    else:
+        fc = cfc(root_level, level, le)
+    unitary_center = fc/(coarse_grid*2.0**(level-1))
+    assert np.all(unitary_center < 1.0)
 
-    #now read the hvars and vars arrays
-    #we are looking for iOctCh
-    #we record if iOctCh is >0, in which it is subdivided
-    #iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
-    
+    # again emulate the fortran code
+    # This is all for calculating child oct locations
+    # iC_ = iC + nbshift
+    # iO = ishft ( iC_ , - ndim )
+    # id = ishft ( 1, MaxLevel - iOctLv(iO) )
+    # j  = iC_ + 1 - ishft( iO , ndim )
+    # Posx   = d_x * (iOctPs(1,iO) + sign ( id , idelta(j,1) ))
+    # Posy   = d_x * (iOctPs(2,iO) + sign ( id , idelta(j,2) ))
+    # Posz   = d_x * (iOctPs(3,iO) + sign ( id , idelta(j,3) ))
+    # idelta = [[-1,  1, -1,  1, -1,  1, -1,  1],
+              #[-1, -1,  1,  1, -1, -1,  1,  1],
+              #[-1, -1, -1, -1,  1,  1,  1,  1]]
+    # idelta = np.array(idelta)
+    # if ncell0 is None:
+        # ncell0 = coarse_grid**3
+    # nchild = 8
+    # ndim = 3
+    # nshift = nchild -1
+    # nbshift = nshift - ncell0
+    # iC = iocts #+ nbshift
+    # iO = iC >> ndim #possibly >>
+    # id = 1 << (root_level - level)
+    # j = iC + 1 - ( iO << 3)
+    # delta = np.abs(id)*idelta[:,j-1]
+
+    # try without the -1
+    # le = le/2**(root_level+1-level)
+    # now read the hvars and vars arrays
+    # we are looking for iOctCh
+    # we record if iOctCh is >0, in which it is subdivided
+    # iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
     f.seek(pos)
-    return le,fl,iocts,nLevel,root_level
+    return unitary_center, fl, iocts, nLevel, root_level
 
 
-def read_particles(file,Nrow):
-    words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
-    real_size = 4 # for file_particle_data; not always true?
-    np_per_page = Nrow**2 # defined in ART a_setup.h
+def read_particles(file, Nrow, idxa=None, idxb=None, field=None):
+    words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
+    real_size = 4  # for file_particle_data; not always true?
+    np_per_page = Nrow**2  # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
+    data = np.array([], 'f4')
+    fh = open(file, 'r')
+    totalp = idxb-idxa
+    left = totalp
+    for page in range(num_pages):
+        for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
+            if i == field or fname == field:
+                if idxa is not None:
+                    fh.seek(real_size*idxa, 1)
+                    count = min(np_per_page, left)
+                    temp = np.fromfile(fh, count=count, dtype='>f4')
+                    pageleft = np_per_page-count-idxa
+                    fh.seek(real_size*pageleft, 1)
+                    left -= count
+                else:
+                    count = np_per_page
+                    temp = np.fromfile(fh, count=count, dtype='>f4')
+                data = np.concatenate((data, temp))
+            else:
+                fh.seek(4*np_per_page, 1)
+    return data
 
-    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
-    return data[:,0:3],data[:,3:]
 
-def read_star_field(file,field=None):
+def read_star_field(file, field=None):
     data = {}
-    with open(file,'rb') as fh:
+    with open(file, 'rb') as fh:
         for dtype, variables in star_struct:
-            if field in variables or dtype=='>d' or dtype=='>d':
-                data[field] = _read_frecord(fh,'>f')
+            found = field in variables or field == variables
+            if found:
+                data[field] = read_vector(fh, dtype[1], dtype[0])
             else:
-                _skip_record(fh)
-    return data.pop(field),data
+                skip(fh, endian='>')
+    return data.pop(field)
 
-def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
+
+def _read_child_mask_level(f, level_child_offsets, level, nLevel, nhydro_vars):
     f.seek(level_child_offsets[level])
-    nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = np.zeros(nLevel,dtype='uint8')
-    idc = np.zeros(nLevel,dtype='int32')
-    
+    nvals = nLevel * (nhydro_vars + 6)  # 2 vars, 2 pads
+    ioctch = np.zeros(nLevel, dtype='uint8')
+    idc = np.zeros(nLevel, dtype='int32')
+
     chunk = long(1e6)
     left = nLevel
     width = nhydro_vars+6
-    a,b=0,0
+    a, b = 0, 0
     while left > 0:
-        chunk = min(chunk,left)
+        chunk = min(chunk, left)
         b += chunk
         arr = np.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
-        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
-        idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
-        #zero in the mask means there is refinement available
-        a=b
+        assert np.all(arr[0, :] == arr[-1, :])  # pads must be equal
+        idc[a:b] = arr[1, :]-1  # fix fortran indexing
+        ioctch[a:b] = arr[
+            2, :] == 0  # if it is above zero, then refined available
+        # zero in the mask means there is refinement available
+        a = b
         left -= chunk
-    assert left==0
-    return idc,ioctch
-    
-nchem=8+2
-dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
-                ",>%sf4"%(2)+",>i4")
-def _read_child_level(f,level_child_offsets,level_oct_offsets,level_info,level,
-                      fields,domain_dimensions,ncell0,nhydro_vars=10,nchild=8):
-    #emulate the fortran code for reading cell data
-    #read ( 19 ) idc, iOctCh(idc), (hvar(i,idc),i=1,nhvar), 
+    assert left == 0
+    return idc, ioctch
+
+nchem = 8+2
+dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4" % (nchem) +
+                ",>%sf4" % (2)+",>i4")
+
+
+def _read_child_level(
+    f, level_child_offsets, level_oct_offsets, level_info, level,
+    fields, domain_dimensions, ncell0, nhydro_vars=10, nchild=8,
+        noct_range=None):
+    # emulate the fortran code for reading cell data
+    # read ( 19 ) idc, iOctCh(idc), (hvar(i,idc),i=1,nhvar),
     #    &                 (var(i,idc), i=2,3)
-    #contiguous 8-cell sections are for the same oct;
-    #ie, we don't write out just the 0 cells, then the 1 cells
-    left_index, fl, octs, nocts,root_level = _read_art_level_info(f, 
-        level_oct_offsets,level, coarse_grid=domain_dimensions[0])
-    nocts = level_info[level]
-    ncells = nocts*8
-    f.seek(level_child_offsets[level])
-    arr = np.fromfile(f,dtype=hydro_struct,count=ncells)
-    assert np.all(arr['pad1']==arr['pad2']) #pads must be equal
-    #idc = np.argsort(arr['idc']) #correct fortran indices
-    #translate idc into icell, and then to iOct
-    icell = (arr['idc'] >> 3) << 3
-    iocts = (icell-ncell0)/nchild #without a F correction, theres a +1
-    #assert that the children are read in the same order as the octs
-    assert np.all(octs==iocts[::nchild]) 
-    if len(fields)>1:
-        vars = np.concatenate((arr[field] for field in fields))
+    # contiguous 8-cell sections are for the same oct;
+    # ie, we don't write out just the 0 cells, then the 1 cells
+    # optionally, we only read noct_range to save memory
+    left_index, fl, octs, nocts, root_level = _read_art_level_info(f,
+                                                                   level_oct_offsets, level, coarse_grid=domain_dimensions[0])
+    if noct_range is None:
+        nocts = level_info[level]
+        ncells = nocts*8
+        f.seek(level_child_offsets[level])
+        arr = np.fromfile(f, dtype=hydro_struct, count=ncells)
+        assert np.all(arr['pad1'] == arr['pad2'])  # pads must be equal
+        # idc = np.argsort(arr['idc']) #correct fortran indices
+        # translate idc into icell, and then to iOct
+        icell = (arr['idc'] >> 3) << 3
+        iocts = (icell-ncell0)/nchild  # without a F correction, theres a +1
+        # assert that the children are read in the same order as the octs
+        assert np.all(octs == iocts[::nchild])
     else: