Christopher Moody avatar Christopher Moody committed b1cb776

adding log info. starting on memory optimization; loading left edges by level.

Comments (0)

Files changed (3)

yt/frontends/art/data_structures.py

         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         
-        min_eff = 0.2
+        min_eff = 0.40
         
         f = open(self.pf.parameter_filename,'rb')
         self.pf.nhydro_vars, self.pf.level_info, self.pf.level_offsetsa = \
                 self.proto_grids.append([])
                 continue
             ggi = (ogrid_levels == level).ravel()
-            mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf.domain_dimensions * 2**level
             dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
             fl = ogrid_file_locations[ggi,:]
                         hilbert_indices, unique_indices, left_index, fl)
             
             #iterate over the domains    
-            pbar = get_pbar("Re-gridding ", len(locs))
-            import pdb; pdb.set_trace()
+            step=0
+            pbar = get_pbar("Re-gridding  Level %i"%level, len(locs))
+            psg_eff = []
+            psg_dep = []
             for ddleft_index, ddfl in zip(lefts, locs):
                 #iterate over just the unique octs
                 #why would we ever have non-unique octs?
                 #that only partially fill the grid,it  may be more efficient
                 #to split large patches into smaller patches. We split
                 #if less than 10% the volume of a patch is covered with octs
-                psgs.extend(_ramses_reader.recursive_patch_splitting(
+                psg_split = _ramses_reader.recursive_patch_splitting(
                     psg, idims, initial_left, 
-                    dleft_index, dfl,min_eff=min_eff))
+                    dleft_index, dfl,min_eff=min_eff)
+                    
+                psgs.extend(psg_split)
+                
+                tol = 1.00001
+                psg_eff  += [x.efficiency for x in psg_split] 
+                psg_dep  += [x.num_deep for x in psg_split] 
+                
                 step+=1
                 pbar.update(step)
+            eff_mean = na.mean(psg_eff)
+            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_nall = len(psg_eff)
+            dep_mean = na.rint(na.mean(psg_dep))
+            mylog.info("Average subgrid efficiency %02.1f %% and average depth %i",
+                        eff_mean*100.0, dep_mean)
+            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
+            mylog.info("Re-gridding level %i: %s octree grids", level, ggi.sum())
+            
+        
             mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
-            print sum(len(psg.grid_file_locations) for psg in psgs)
-            mylog.info("Final grid count: %s", len(self.proto_grids[level]))
+            #print sum(len(psg.grid_file_locations) for psg in psgs)
+            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
                     
         #print 'offset:',f.tell()
         Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
            '>iii', _read_record(f))
-        print 'Level %i : '%Lev, iNOLL
+        #print 'Level %i : '%Lev, iNOLL
         #print 'offset after level record:',f.tell()
         iOct = iHOLL[Lev] - 1
         nLevel = iNOLL[Lev]
     f.seek(offset)
     return nhydrovars, iNOLL, level_offsets
 
+def _read_art_level(f, level_offsets,level):
+    pos = f.tell()
+    f.seek(level_offsets[leve])
+    #Get the info for this level, skip the rest
+    #print "Reading oct tree data for level", Lev
+    #print 'offset:',f.tell()
+    Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
+       '>iii', _read_record(f))
+    #print 'Level %i : '%Lev, iNOLL
+    #print 'offset after level record:',f.tell()
+    iOct = iHOLL[Lev] - 1
+    nLevel = iNOLL[Lev]
+    nLevCells = nLevel * nchild
+    ntot = ntot + nLevel
+
+    #Skip all the oct hierarchy data
+    #in the future, break this up into large chunks
+    count = nLevel*15
+    le  = numpy.zeros((count,3),dtype='int64')
+    fl  = numpy.zeros((count,6),dtype='int64')
+    idxa,idxb = 0,0
+    chunk = 1e9 #this is ~111MB for 15 dimensional 64 bit arrays
+    while left > 0 :
+        data = na.fromfile(f,dtype='>i',count=chunk*15)
+        data.reshape(chunk,15)
+        left = count-index
+        le[idxa:idxb,:] = data[0:3]
+        fl[idxa:idxb,1] = numpy.arange(chunk)
+    del data
+    f.seek(pos)
+    return le,fl
+

yt/frontends/ramses/_ramses_reader.pyx

     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
-    if num_deep > 40:
+    if num_deep > 60:
         psg.efficiency = min_eff
         return [psg]
     if psg.efficiency > min_eff or psg.efficiency < 0.0:
     if L.efficiency <= 0.0: rv_l = []
     elif L.efficiency < min_eff:
         rv_l = recursive_patch_splitting(L, dims_l, li_l,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff)
     else:
         rv_l = [L]
     R = ProtoSubgrid(li_r, dims_r, left_index, fl)
     if R.efficiency <= 0.0: rv_r = []
     elif R.efficiency < min_eff:
         rv_r = recursive_patch_splitting(R, dims_r, li_r,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff)
     else:
         rv_r = [R]
     return rv_r + rv_l

yt/utilities/_amr_utils/fortran_reader.pyx

     # points to the start of the record *following* the reading of iOctFree and
     # nOct.  For those following along at home, we only need to read:
     #   iOctPr, iOctLv
-    print min_level, max_level 
-    
     cdef int nchild = 8
     cdef int i, Lev, cell_ind, iOct, nLevel, nLevCells, ic1
     cdef np.int64_t next_record
         fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
         iOct = iHOLL[Level] - 1
         nLevel = iNOLL[Level]
-        print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
+        #print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
         #print ftell(f)
         for ic1 in range(nLevel):
             iOctMax = max(iOctMax, iOct)
         
         #find the length of all of the children section
         child_record = ftell(f) +  (next_record+2*sizeof(int))*nLevel*nchild
-        print 'Skipping over hydro vars', ftell(f), child_record
+        #print 'Skipping over hydro vars', ftell(f), child_record
         fseek(f, child_record, SEEK_SET)
         
         # for ic1 in range(nLevel * nchild):
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.