Commits

Cameron Hummels  committed 9c01dfb Merge

Merging.

  • Participants
  • Parent commits 7eb9991, a9fd65a

Comments (0)

Files changed (56)

File .hgignore

File contents unchanged.
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+6528c562fed6f994b8d1ecabaf375ddc4707dade mpi-opaque
+0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
-0000000000000000000000000000000000000000 hop callback

File doc/install_script.sh

 # There are a few options, but you only need to set *one* of them.  And
 # that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
 # installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of YT, you can set YT_DIR, too.  (It'll already
+# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
 # And, feel free to drop me a line: matthewturk@gmail.com
 # and install it on its own
 #HDF5_DIR=
 
-# If you need to supply arguments to the NumPy build, supply them here
+# If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 INST_SCIPY=0    # Install scipy?
 
-# If you've got YT some other place, set this to point to it.
+# If you've got yt some other place, set this to point to it.
 YT_DIR=""
 
 # If you need to pass anything to matplotlib, do so here.
         echo "   $ module swap PE-pgi PE-gnu"
         echo
     fi
-    if [ "${MYHOSTLONG%%ranger}" != "${MYHOSTLONG}" ]
-    then
-        echo "Looks like you're on Ranger."
-        echo
-        echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
-        echo "These commands should take care of that for you:"
-        echo
-        echo "   $ module unload mvapich2"
-        echo "   $ module swap pgi gcc"
-        echo "   $ module load mvapich2"
-        echo
-    fi
     if [ "${MYHOST##steele}" != "${MYHOST}" ]
     then
         echo "Looks like you're on Steele."
         echo
         echo "NOTE: you must have the Xcode command line tools installed."
         echo
-        echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
-        echo "website"
+	echo "The instructions for obtaining these tools varies according"
+	echo "to your exact OS version.  On older versions of OS X, you"
+	echo "must register for an account on the apple developer tools"
+	echo "website: https://developer.apple.com/downloads to obtain the"
+	echo "download link."
+	echo 
+	echo "We have gathered some additional instructions for each"
+	echo "version of OS X below. If you have trouble installing yt"
+	echo "after following these instructions, don't hesitate to contact"
+	echo "the yt user's e-mail list."
+	echo
+	echo "You can see which version of OSX you are running by clicking"
+	echo "'About This Mac' in the apple menu on the left hand side of"
+	echo "menu bar.  We're assuming that you've installed all operating"
+	echo "system updates; if you have an older version, we suggest"
+	echo "running software update and installing all available updates."
+	echo 
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo "Apple developer tools website."
         echo
-        echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
-        echo "website"
+        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+	echo "developer tools website.  You can either download the"
+	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+	echo "Software Update to update to XCode 3.2.6 or" 
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "bundle (4.1 GB)."
         echo
-        echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
-        echo "alternatively download the Xcode command line tools from"
-        echo "the mac developer tools website"
+        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+	echo "(search for Xcode)."
+        echo "Alternatively, download the Xcode command line tools from"
+        echo "the Apple developer tools website."
         echo
-        echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
-        echo "Leopard) or newer.  If you do, please set the following"
-        echo "environment variables, remove any broken installation tree, and"
-        echo "re-run this script verbatim."
+	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "(search for Xcode)."
+	echo "Additionally, you will have to manually install the Xcode"
+	echo "command line tools, see:" 
+	echo "http://stackoverflow.com/questions/9353444"
+	echo "Alternatively, download the Xcode command line tools from"
+	echo "the Apple developer tools website."
+	echo
+        echo "NOTE: It's possible that the installation will fail, if so," 
+	echo "please set the following environment variables, remove any" 
+	echo "broken installation tree, and re-run this script verbatim."
         echo
         echo "$ export CC=gcc-4.2"
         echo "$ export CXX=g++-4.2"
-        echo
+	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
         fi
     fi
+    if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+    then
+        echo "Looks like you're on an OpenSUSE-compatible machine."
+        echo
+        echo "You need to have these packages installed:"
+        echo
+        echo "  * devel_C_C++"
+        echo "  * libopenssl-devel"
+        echo "  * libuuid-devel"
+        echo "  * zip"
+        echo "  * gcc-c++"
+        echo
+        echo "You can accomplish this by executing:"
+        echo
+        echo "$ sudo zypper install -t pattern devel_C_C++"
+        echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+        echo
+        echo "I am also setting special configure arguments to Python to"
+        echo "specify control lib/lib64 issues."
+        PYCONF_ARGS="--libdir=${DEST_DIR}/lib"
+    fi
     if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
     then
         echo "Looks like you're on an Ubuntu-compatible machine."
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
+    if [ $INST_SCIPY -eq 1 ]
+    then
+	echo
+	echo "Looks like you've requested that the install script build SciPy."
+	echo
+	echo "If the SciPy build fails, please uncomment one of the the lines"
+	echo "at the top of the install script that sets NUMPY_ARGS, delete"
+	echo "any broken installation tree, and re-run the install script"
+	echo "verbatim."
+	echo
+	echo "If that doesn't work, don't hesitate to ask for help on the yt"
+	echo "user's mailing list."
+	echo
+    fi
     if [ ! -z "${CFLAGS}" ]
     then
         echo "******************************************"
 echo
 echo "========================================================================"
 echo
-echo "Hi there!  This is the YT installation script.  We're going to download"
+echo "Hi there!  This is the yt installation script.  We're going to download"
 echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for YT to run within."
+echo "environment for yt to run within."
 echo
 echo "Inside the installation script you can set a few variables.  Here's what"
 echo "they're currently set to -- you can hit Ctrl-C and edit the values in "
 echo "be installing PyX"
 
 printf "%-15s = %s so I " "INST_SCIPY" "${INST_SCIPY}"
-get_willwont ${INST_PYX}
+get_willwont ${INST_SCIPY}
 echo "be installing scipy"
 
 printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
 echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
+echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.4.tar.gz
+get_ytproject mercurial-2.5.1.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
 get_ytproject h5py-2.1.0.tar.gz
 get_ytproject Cython-0.17.1.tar.gz
 
 if [ ! -e Python-2.7.3/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
+    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
     [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
     cd Python-2.7.3
-    ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.4
+    do_setup_py mercurial-2.5.1
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
 	    echo "Building LAPACK"
 	    cd lapack-3.4.2/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi

File yt/analysis_modules/halo_finding/halo_objects.py

         if self.CoM is not None:
             return self.CoM
         pm = self["ParticleMassMsun"]
-        cx = self["particle_position_x"]
-        cy = self["particle_position_y"]
-        cz = self["particle_position_z"]
-        if isinstance(self, FOFHalo):
-            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
-        else:
-            c_vec = self.maximum_density_location() - self.pf.domain_center
-        cx = (cx - c_vec[0])
-        cy = (cy - c_vec[1])
-        cz = (cz - c_vec[2])
-        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
-        return (com * pm).sum(axis=1) / pm.sum() + c_vec
+        c = {}
+        c[0] = self["particle_position_x"]
+        c[1] = self["particle_position_y"]
+        c[2] = self["particle_position_z"]
+        c_vec = np.zeros(3)
+        com = []
+        for i in range(3):
+            # A halo is likely periodic around a boundary if the distance 
+            # between the max and min particle
+            # positions are larger than half the box. 
+            # So skip the rest if the converse is true.
+            # Note we might make a change here when periodicity-handling is
+            # fully implemented.
+            if (c[i].max() - c[i].min()) < (self.pf.domain_width[i] / 2.):
+                com.append(c[i])
+                continue
+            # Now we want to flip around only those close to the left boundary.
+            d_left = c[i] - self.pf.domain_left_edge[i]
+            sel = (d_left <= (self.pf.domain_width[i]/2))
+            c[i][sel] += self.pf.domain_width[i]
+            com.append(c[i])
+        com = np.array(com)
+        c = (com * pm).sum(axis=1) / pm.sum()
+        return c%self.pf.domain_width
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
     _radjust = 1.05
 
     def __init__(self, pf, id, size=None, CoM=None,
-
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
         e1_vec=None, tilt=None, supp=None):
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
+
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,

File yt/analysis_modules/halo_mass_function/halo_mass_function.py

         not stored in enzo datasets, so must be entered by hand.
         sigma8input=%f primordial_index=%f omega_baryon0=%f
         """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        time.sleep(1)
         
         # Do the calculations.
         self.sigmaM()

File yt/analysis_modules/radial_column_density/radial_column_density.py

File contents unchanged.

File yt/config.py

     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold005',
+    gold_standard_filename = 'gold006',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

File yt/data_objects/data_containers.py

         else:
             self.fields = ensure_list(fields)
         from yt.visualization.plot_window import \
-            GetOffAxisBoundsAndCenter, PWViewerMPL
+            GetObliqueWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_units = units
         pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
                          frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
         pw.set_axes_unit(axes_unit)

File yt/data_objects/hierarchy.py

         mylog.debug("Detecting fields.")
         self._detect_fields()
 
+        mylog.debug("Detecting fields in backup file")
+        self._detect_fields_backup()
+
         mylog.debug("Adding unknown detected fields")
         self._setup_unknown_fields()
 
         if self._data_file is not None:
             self._data_file.close()
 
+    def _detect_fields_backup(self):
+        # grab fields from backup file as well, if present
+        try:
+            backup_filename = self.parameter_file.backup_filename
+            f = h5py.File(backup_filename, 'r')
+            g = f["data"]
+            grid = self.grids[0] # simply check one of the grids
+            grid_group = g["grid_%010i" % (grid.id - grid._id_offset)]
+            for field_name in grid_group:
+                if field_name != 'particles':
+                    self.field_list.append(field_name)
+        except KeyError:
+            return
+        except IOError:
+            return
+
     def _get_parameters(self):
         return self.parameter_file.parameters
     parameters=property(_get_parameters)

File yt/data_objects/static_output.py

         self.basename = os.path.basename(filename)
         self.directory = os.path.expanduser(os.path.dirname(filename))
         self.fullpath = os.path.abspath(self.directory)
+        self.backup_filename = self.parameter_filename + '_backup.gdf'
+        self.read_from_backup = False
+        if os.path.exists(self.backup_filename):
+            self.read_from_backup = True
         if len(self.directory) == 0:
             self.directory = "."
 

File yt/data_objects/tests/test_cutting_plane.py

+from yt.testing import *
+import os
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
+def test_cutting_plane():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        center = [0.5,0.5,0.5]
+        normal = [1,1,1]
+        fns = []
+        cut = pf.h.cutting(normal, center, ["Ones", "Density"])
+        yield assert_equal, cut["Ones"].sum(), cut["Ones"].size
+        yield assert_equal, cut["Ones"].min(), 1.0
+        yield assert_equal, cut["Ones"].max(), 1.0
+        pw = cut.to_pw()
+        fns += pw.save()
+        frb = cut.to_frb((1.0,'unitary'), 64)
+        for cut_field in ['Ones', 'Density']:
+            yield assert_equal, frb[cut_field].info['data_source'], \
+                cut.__str__()
+            yield assert_equal, frb[cut_field].info['axis'], \
+                4
+            yield assert_equal, frb[cut_field].info['field'], \
+                cut_field
+            yield assert_equal, frb[cut_field].info['units'], \
+                pf.field_info[cut_field].get_units()
+            yield assert_equal, frb[cut_field].info['xlim'], \
+                frb.bounds[:2]
+            yield assert_equal, frb[cut_field].info['ylim'], \
+                frb.bounds[2:]
+            yield assert_equal, frb[cut_field].info['length_to_cm'], \
+                pf['cm']
+            yield assert_equal, frb[cut_field].info['center'], \
+                cut.center
+        teardown_func(fns)

File yt/data_objects/tests/test_streamlines.py

 
 _fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
 
-def test_covering_grid():
+def test_streamlines():
     # We decompose in different ways
     cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
     cs = np.array([a.ravel() for a in cs]).T

File yt/frontends/_skeleton/io.py

     _particle_reader = False
     _data_style = "skeleton"
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         # This must return the array, of size/shape grid.ActiveDimensions, that
         # corresponds to 'field'.
         pass
 
     def _read_data_slice(self, grid, field, axis, coord):
         # If this is not implemented, the IO handler will just slice a
-        # _read_data_set item.
+        # _read_data item.
         pass

File yt/frontends/art/io.py

             return dat[idx]
         raise KeyError
         
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         if field in particle_fields:
             return self._read_particle_field(grid, field)
         pf = grid.pf
             l_delta += 1
         return tr.astype("float64")
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        return self._read_data_set(grid, field)[sl]
-
 def _count_art_octs(f, offset, 
                    MinLev, MaxLevelNow):
     level_oct_offsets= [0,]

File yt/frontends/athena/io.py

     def _read_field_names(self,grid):
         pass
 
-    def _read_data_set(self,grid,field):
+    def _read_data(self,grid,field):
         f = file(grid.filename, 'rb')
         dtype, offsetr = grid.hierarchy._field_map[field]
         grid_ncells = np.prod(grid.ActiveDimensions)
         sl[axis] = slice(coord, coord + 1)
         if grid.pf.field_ordering == 1:
             sl.reverse()
+        return self._read_data_set(grid, field)[sl]
 
-        f = file(grid.filename, 'rb')
-        dtype, offsetr = grid.hierarchy._field_map[field]
-        grid_ncells = np.prod(grid.ActiveDimensions)
-        grid_dims = grid.ActiveDimensions
-        grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
-        read_table_offset = get_read_table_offset(f)
-        if grid_ncells != grid0_ncells:
-            offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
-        if grid_ncells == grid0_ncells:
-            offset = offsetr
-        f.seek(read_table_offset+offset)
-        if dtype == 'scalar':
-            data = np.fromfile(f, dtype='>f4', 
-                    count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
-        if dtype == 'vector':
-            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
-            if '_x' in field:
-                data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
-            elif '_y' in field:
-                data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
-            elif '_z' in field:
-                data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
-        f.close()
-        return data
 
 def get_read_table_offset(f):
     line = f.readline()

File yt/frontends/castro/io.py

             tr)
         return tr
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.
 
 
         inFile.close()
         return field
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        """wishful thinking?
-        """
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        #sl = tuple(reversed(sl))
-        return self._read_data_set(grid, field)[sl]
-
-

File yt/frontends/chombo/data_structures.py

     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
         self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
-
+          
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.fullplotdir = os.path.abspath(filename)
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
+        self.cosmological_simulation = False
         fileh.close()
 
     def _set_units(self):

File yt/frontends/chombo/io.py

   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import h5py
+import os
 import re
 import numpy as np
 
         fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
         fhandle.close()
     
-    def _read_data_set(self,grid,field):
+    def _read_data(self,grid,field):
+
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
 
         field_dict = self._field_dict(fhandle)
         start = grid_offset+field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
-
+        
         fhandle.close()
         return data.reshape(dims, order='F')
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        return self._read_data_set(grid,field)[sl]
-
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files

File yt/frontends/enzo/api.py

 
 from .io import \
       IOHandlerEnzoHDF4, \
-      IOHandlerEnzoHDF4_2D, \
       IOHandlerEnzoHDF5, \
       IOHandlerPackedHDF5, \
       IOHandlerInMemory, \

File yt/frontends/enzo/data_structures.py

         for p, v in self._conversion_override.items():
             self.conversion_factors[p] = v
         self.refine_by = self.parameters["RefineBy"]
+        self.periodicity = ensure_tuple(self.parameters["LeftFaceBoundaryCondition"] == 3)
         self.dimensionality = self.parameters["TopGridRank"]
         self.domain_dimensions = self.parameters["TopGridDimensions"]
         self.current_time = self.parameters["InitialTime"]

File yt/frontends/enzo/io.py

         """
         return SD.SD(grid.filename).datasets().keys()
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         """
         Returns after having obtained or generated a field.  Should throw an
         exception.  Should only be called as EnzoGridInstance.readData()
         """
         return SD.SD(grid.filename).select(field).get().swapaxes(0,2)
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        """
-        Reads a slice through the HDF4 data
-
-        @param grid: Grid to slice
-        @type grid: L{EnzoGrid<EnzoGrid>}
-        @param field: field to get
-        @type field: string
-        @param sl: region to get
-        @type sl: SliceType
-        """
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        sl = tuple(reversed(sl))
-        return SD.SD(grid.filename).select(field)[sl].swapaxes(0,2)
-
     @property
     def _read_exception(self):
         return SD.HDF4Error
 
-class IOHandlerEnzoHDF4_2D(IOHandlerEnzoHDF4):
-
-    _data_style = "enzo_hdf4_2d"
-
-    def _read_data_set(self, grid, field):
-        t = SD.SD(grid.filename).select(field).get()[:,:,None]
-        return t.swapaxes(0,1)
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        t = SD.SD(grid.filename).select(field).get()
-        return t.transpose()
-
-    def modify(self, field):
-        return field
-
 class IOHandlerEnzoHDF5(BaseIOHandler):
 
     _data_style = "enzo_hdf5"
         """
         return hdf5_light_reader.ReadListOfDatasets(grid.filename, "/")
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         return hdf5_light_reader.ReadData(grid.filename, "/%s" % field).swapaxes(0,2)
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        """
-        Reads a slice through the HDF5 data
-
-        @param grid: Grid to slice
-        @type grid: L{EnzoGrid<EnzoGrid>}
-        @param field: field to get
-        @type field: string
-        @param axis: axis to slice along
-        @param coord: coord to slice at
-        """
-        axis = {0:2,1:1,2:0}[axis]
-        t = hdf5_light_reader.ReadDataSlice(grid.filename, "/%s" %
-                        (field), axis, coord).transpose()
-        return t
-
     def modify(self, field):
         return field.swapaxes(0,2)
 
             for gid in data: self.queue[gid].update(data[gid])
         mylog.debug("Finished read of %s", sets)
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         tr = hdf5_light_reader.ReadData(grid.filename,
                 "/Grid%08i/%s" % (grid.id, field))
         if tr.dtype == "float32": tr = tr.astype("float64")
         return self.modify(tr)
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        axis = _axis_ids[axis]
-        return hdf5_light_reader.ReadDataSlice(grid.filename, "/Grid%08i/%s" %
-                        (grid.id, field), axis, coord).transpose()
-
     def _read_field_names(self, grid):
         return hdf5_light_reader.ReadListOfDatasets(
                     grid.filename, "/Grid%08i" % grid.id)
         tr = field[3:-3,3:-3,3:-3].swapaxes(0,2)
         return tr.copy() # To ensure contiguous
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        axis = _axis_ids[axis]
-        return hdf5_light_reader.ReadDataSlice(grid.filename, "/Grid%08i/%s" %
-                        (grid.id, field), axis, coord)[3:-3,3:-3].transpose()
-
     def _read_raw_data_set(self, grid, field):
         return hdf5_light_reader.ReadData(grid.filename,
                 "/Grid%08i/%s" % (grid.id, field))
                       slice(ghost_zones,-ghost_zones))
         BaseIOHandler.__init__(self)
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         if grid.id not in self.grids_in_memory:
             mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
             raise KeyError
         return self.grids_in_memory[grid.id].keys()
 
     def _read_data_slice(self, grid, field, axis, coord):
+        # This data style cannot have a sidecar file
         sl = [slice(3,-3), slice(3,-3), slice(3,-3)]
         sl[axis] = slice(coord + 3, coord + 4)
         sl = tuple(reversed(sl))
     _data_style = "enzo_packed_2d"
     _particle_reader = False
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         return hdf5_light_reader.ReadData(grid.filename,
             "/Grid%08i/%s" % (grid.id, field)).transpose()[:,:,None]
 
     _data_style = "enzo_packed_1d"
     _particle_reader = False
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         return hdf5_light_reader.ReadData(grid.filename,
             "/Grid%08i/%s" % (grid.id, field)).transpose()[:,None,None]
 

File yt/frontends/flash/data_structures.py

         self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
-
+        
         self.particle_filename = particle_filename
 
         if self.particle_filename is None :
         self.current_time = self.parameters["time"]
 
         # Determine if this is a periodic box
-        p = [self.parameters.get("%sl_boundary_type" % ax, None) == Periodic for ax in 'xyz']
+        p = [self.parameters.get("%sl_boundary_type" % ax, None) == "periodic" for ax in 'xyz']
         self.periodicity = tuple(p)
 
         # Determine cosmological parameters.

File yt/frontends/flash/io.py

             count_list, conv_factors):
         pass
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         f = self._handle
         f_part = self._particle_handle
         if field in self._particle_fields:
         else:
             tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
         return tr.astype("float64")
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        f = self._handle
-        tr = f["/%s" % field][grid.id - grid._id_offset].transpose()[sl]
-        return tr.astype("float64")
-

File yt/frontends/flash/tests/test_outputs.py

+"""
+FLASH frontend tests
+
+Author: John ZuHone <jzuhone@gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 John ZuHone.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.flash.api import FLASHStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
+
+sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+@requires_pf(sloshing)
+def test_sloshing():
+    pf = data_dir_load(sloshing)
+    yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
+    for test in small_patch_amr(sloshing, _fields):
+        yield test
+
+_fields_2d = ("Temperature", "Density")
+
+wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+@requires_pf(wt)
+def test_wind_tunnel():
+    pf = data_dir_load(wt)
+    yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
+    for test in small_patch_amr(wt, _fields_2d):
+        yield test
+
+gcm = "GalaxyClusterMerger/fiducial_1to10_b0.273d_hdf5_plt_cnt_0245.gz"
+@requires_pf(gcm, big_data=True)
+def test_galaxy_cluster_merger():
+    pf = data_dir_load(gcm)
+    for test in big_patch_amr(gcm, _fields):
+        yield test
+

File yt/frontends/gadget/io.py

 
 class IOHandlerGadget(BaseIOHandler):
     _data_style = 'gadget_infrastructure'
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         data = []
         fh = h5py.File(grid.filename,mode='r')
         for ptype in grid.particle_types:
 
     def _read_data_slice(self,grid, field, axis, coord):
         #how would we implement axis here?
-        dat = self._read_data_set(grid,field)
+        dat = self._read_data(grid,field)
         return dat[coord]
 

File yt/frontends/gdf/io.py

         fhandle.close()
         return names
     
-    def _read_data_set(self,grid,field):
+    def _read_data(self,grid,field):
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
         data = (fhandle['/data/grid_%010i/'%grid.id+field][:]).copy()
         fhandle.close()
         sl[axis] = slice(coord, coord + 1)
         if grid.pf.field_ordering == 1:
             sl.reverse()
-        fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
-        data = (fhandle['/data/grid_%010i/'%grid.id+field][:][sl]).copy()
-        fhandle.close()
+        data = self._read_data_set(grid, field)
         if grid.pf.field_ordering == 1:
             return data.T
         else:

File yt/frontends/maestro/io.py

     def modify(self, field):
         return field.swapaxes(0,2)
 
-    def _read_data_set(self,grid,field):
+    def _read_data(self,grid,field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.
 
         inFile.close()
         return field
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        """wishful thinking?
-        """
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        #sl = tuple(reversed(sl))
-        return self._read_data_set(grid,field)[sl]
-
-

File yt/frontends/nyx/io.py

                               len(nyx_particle_field_names), tr)
         return tr
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         """ reads packed multiFABs output by BoxLib in "NATIVE" format. """
         if field in nyx_particle_field_names:
             return self._read_particle_field(grid, field)
 
         return field
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        # wishful thinking?
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        #sl = tuple(reversed(sl))
-        return self._read_data_set(grid, field)[sl]

File yt/frontends/orion/data_structures.py

File contents unchanged.

File yt/frontends/orion/io.py

                         particles.append(read(line, field))
         return np.array(particles)
 
-    def _read_data_set(self,grid,field):
+    def _read_data(self,grid,field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.
 
         inFile.close()
         return field
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        """wishful thinking?
-        """
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        #sl = tuple(reversed(sl))
-        return self._read_data_set(grid,field)[sl]
-
-

File yt/frontends/ramses/io.py

         self.ramses_tree = ramses_tree
         BaseIOHandler.__init__(self, *args, **kwargs)
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         tr = np.zeros(grid.ActiveDimensions, dtype='float64')
         filled = np.zeros(grid.ActiveDimensions, dtype='int32')
         to_fill = grid.ActiveDimensions.prod()
             l_delta += 1
         return tr
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        return self._read_data_set(grid, field)[sl]
-
     def preload(self, grids, sets):
         if len(grids) == 0: return
         domain_keys = defaultdict(list)
             mylog.debug("Starting read of domain %s (%s)", domain, sets)
             for field in sets:
                 for g in grids:
-                    self.queue[g.id][field] = self._read_data_set(g, field)
+                    self.queue[g.id][field] = self._read_data(g, field)
                 print "Clearing", field, domain
                 self.ramses_tree.clear_tree(field, domain - 1)
         mylog.debug("Finished read of %s", sets)

File yt/frontends/stream/io.py

         self.fields = stream_handler.fields
         BaseIOHandler.__init__(self)
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         # This is where we implement processor-locking
         #if grid.id not in self.grids_in_memory:
         #    mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
         #    raise KeyError
-        tr = self.fields[grid.id][field]
+        tr = self.fields[grid.id][field].copy()
         # If it's particles, we copy.
-        if len(tr.shape) == 1: return tr.copy()
-        # New in-place unit conversion breaks if we don't copy first
         return tr
 
     def modify(self, field):
     def _read_field_names(self, grid):
         return self.fields[grid.id].keys()
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        sl = tuple(sl)
-        tr = self.fields[grid.id][field][sl]
-        # In-place unit conversion requires we return a copy
-        return tr.copy()
-
     @property
     def _read_exception(self):
         return KeyError

File yt/frontends/tiger/io.py

         BaseIOHandler.__init__(self, *args, **kwargs)
         self._memmaps = {}
 
-    def _read_data_set(self, grid, field):
+    def _read_data(self, grid, field):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
         LD = np.array(grid.left_dims, dtype='int64')
         SS = np.array(grid.ActiveDimensions, dtype='int64')
         RS = np.array(grid.pf.root_size, dtype='int64')
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = np.array(grid.left_dims, dtype='int64').copy()
-        SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
-        RS = np.array(grid.pf.root_size, dtype='int64').copy()
-        LD[axis] += coord
-        SS[axis] = 1
-        data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
-        return data

File yt/testing.py

   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import itertools as it
 import numpy as np
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
                  for field,offset in zip(fields,offsets))
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug
+
+def expand_keywords(keywords, full=False):
+    """
+    expand_keywords is a means for testing all possible keyword
+    arguments in the nosetests.  Simply pass it a dictionary of all the
+    keyword arguments and all of the values for these arguments that you
+    want to test.
+
+    It will return a list of **kwargs dicts containing combinations of
+    the various kwarg values you passed it.  These can then be passed
+    to the appropriate function in nosetests. 
+
+    If full=True, then every possible combination of keywords is produced,
+    otherwise, every keyword option is included at least once in the output
+    list.  Be careful, by using full=True, you may be in for an exponentially
+    larger number of tests! 
+
+    keywords : dict
+        a dictionary where the keys are the keywords for the function,
+        and the values of each key are the possible values that this key
+        can take in the function
+
+   full : bool
+        if set to True, every possible combination of given keywords is 
+        returned
+
+    Returns
+    -------
+    array of dicts
+        An array of **kwargs dictionaries to be individually passed to
+        the appropriate function matching these kwargs.
+
+    Examples
+    --------
+    >>> keywords = {}
+    >>> keywords['dpi'] = (50, 100, 200)
+    >>> keywords['cmap'] = ('algae', 'jet')
+    >>> list_of_kwargs = expand_keywords(keywords)
+    >>> print list_of_kwargs
+
+    array([{'cmap': 'algae', 'dpi': 50}, 
+           {'cmap': 'jet', 'dpi': 100},
+           {'cmap': 'algae', 'dpi': 200}], dtype=object)
+
+    >>> list_of_kwargs = expand_keywords(keywords, full=True)
+    >>> print list_of_kwargs
+
+    array([{'cmap': 'algae', 'dpi': 50}, 
+           {'cmap': 'algae', 'dpi': 100},
+           {'cmap': 'algae', 'dpi': 200}, 
+           {'cmap': 'jet', 'dpi': 50},
+           {'cmap': 'jet', 'dpi': 100}, 
+           {'cmap': 'jet', 'dpi': 200}], dtype=object)
+
+    >>> for kwargs in list_of_kwargs:
+    ...     write_projection(*args, **kwargs)
+    """
+
+    # if we want every possible combination of keywords, use iter magic
+    if full:
+        keys = sorted(keywords)
+        list_of_kwarg_dicts = np.array([dict(zip(keys, prod)) for prod in \
+                              it.product(*(keywords[key] for key in keys))])
+            
+    # if we just want to probe each keyword, but not necessarily every 
+    # combination
+    else:
+        # Determine the maximum number of values any of the keywords has
+        num_lists = 0
+        for val in keywords.values():
+            if isinstance(val, str):
+                num_lists = max(1.0, num_lists)
+            else:
+                num_lists = max(len(val), num_lists)
+    
+        # Construct array of kwargs dicts, each element of the list is a different
+        # **kwargs dict.  each kwargs dict gives a different combination of
+        # the possible values of the kwargs
+    
+        # initialize array
+        list_of_kwarg_dicts = np.array([dict() for x in range(num_lists)])
+    
+        # fill in array
+        for i in np.arange(num_lists):
+            list_of_kwarg_dicts[i] = {}
+            for key in keywords.keys():
+                # if it's a string, use it (there's only one)
+                if isinstance(keywords[key], str):
+                    list_of_kwarg_dicts[i][key] = keywords[key]
+                # if there are more options, use the i'th val
+                elif i < len(keywords[key]):
+                    list_of_kwarg_dicts[i][key] = keywords[key][i]
+                # if there are not more options, use the 0'th val
+                else:
+                    list_of_kwarg_dicts[i][key] = keywords[key][0]
+
+    return list_of_kwarg_dicts

File yt/utilities/amr_kdtree/amr_kdtools.py

+"""
+AMR kD-Tree Tools 
+
+Authors: Samuel Skillman <samskillman@gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import numpy as np
+from yt.funcs import *
+from yt.utilities.lib import kdtree_get_choices
+
+def _lchild_id(node_id): return (node_id<<1)
+def _rchild_id(node_id): return (node_id<<1) + 1
+def _parent_id(node_id): return (node_id-1) >> 1
+
+class Node(object):
+    def __init__(self, parent, left, right,
+            left_edge, right_edge, grid_id, node_id):
+        self.left = left
+        self.right = right
+        self.left_edge = left_edge
+        self.right_edge = right_edge
+        self.grid = grid_id
+        self.parent = parent
+        self.id = node_id
+        self.data = None
+        self.split = None
+
+class Split(object):
+    def __init__(self, dim, pos):
+        self.dim = dim
+        self.pos = pos
+
+def should_i_build(node, rank, size):
+    if (node.id < size) or (node.id >= 2*size):
+        return True
+    elif node.id - size == rank:
+        return True
+    else:
+        return False
+
+def add_grids(node, gles, gres, gids, rank, size):
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, gles, gres, gids, rank, size)
+    else:
+        less_ids = gles[:,node.split.dim] < node.split.pos
+        if len(less_ids) > 0:
+            add_grids(node.left, gles[less_ids], gres[less_ids],
+                      gids[less_ids], rank, size)
+
+        greater_ids = gres[:,node.split.dim] > node.split.pos
+        if len(greater_ids) > 0:
+            add_grids(node.right, gles[greater_ids], gres[greater_ids],
+                      gids[greater_ids], rank, size)
+
+def should_i_split(node, rank, size):
+    return node.id < size
+
+def geo_split(node, gles, gres, grid_ids, rank, size):
+    big_dim = np.argmax(gres[0]-gles[0])
+    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
+    old_gre = gres[0].copy()
+    new_gle = gles[0].copy()
+    new_gle[big_dim] = new_pos
+    gres[0][big_dim] = new_pos
+    gles = np.append(gles, np.array([new_gle]), axis=0)
+    gres = np.append(gres, np.array([old_gre]), axis=0)
+    grid_ids = np.append(grid_ids, grid_ids, axis=0)
+
+    split = Split(big_dim, new_pos)
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, gles[:1], gres[:1],
+            grid_ids[:1], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, gles[1:], gres[1:],
+            grid_ids[1:], rank, size)
+    return
+
+def insert_grids(node, gles, gres, grid_ids, rank, size):
+    if not should_i_build(node, rank, size) or grid_ids.size == 0:
+        return
+
+    if len(grid_ids) == 1:
+        # If we should continue to split based on parallelism, do so!
+        if should_i_split(node, rank, size):
+            geo_split(node, gles, gres, grid_ids, rank, size)
+            return
+
+        if np.all(gles[0] <= node.left_edge) and \
+                np.all(gres[0] >= node.right_edge):
+            node.grid = grid_ids[0]
+            assert(node.grid is not None)
+            return
+
+    # Split the grids
+    check = split_grids(node, gles, gres, grid_ids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = None
+    return
+
+def split_grids(node, gles, gres, grid_ids, rank, size):
+    # Find a Split
+    data = np.array([(gles[i,:], gres[i,:]) for i in
+        xrange(grid_ids.shape[0])], copy=False)
+    best_dim, split_pos, less_ids, greater_ids = \
+        kdtree_get_choices(data, node.left_edge, node.right_edge)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        return -1
+
+    split = Split(best_dim, split_pos)
+
+    del data, best_dim, split_pos
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, gles[less_ids], gres[less_ids],
+                 grid_ids[less_ids], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
+                 grid_ids[greater_ids], rank, size)
+
+    return
+
+def new_right(Node, split):
+    new_right = Node.right_edge.copy()
+    new_right[split.dim] = split.pos
+    return new_right
+
+def new_left(Node, split):
+    new_left = Node.left_edge.copy()
+    new_left[split.dim] = split.pos
+    return new_left
+
+def divide(node, split):
+    # Create a Split
+    node.split = split
+    node.left = Node(node, None, None,
+            node.left_edge, new_right(node, split), node.grid,
+                     _lchild_id(node.id))
+    node.right = Node(node, None, None,
+            new_left(node, split), node.right_edge, node.grid,
+                      _rchild_id(node.id))
+    return
+
+def kd_sum_volume(node):
+    if (node.left is None) and (node.right is None):
+        if node.grid is None:
+            return 0.0
+        return np.prod(node.right_edge - node.left_edge)
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+def kd_sum_cells(node):
+    if (node.left is None) and (node.right is None):
+        if node.grid is None:
+            return 0.0
+        return np.prod(node.right_edge - node.left_edge)
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+
+def kd_node_check(node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid is not None:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+def kd_is_leaf(node):
+    has_l_child = node.left is None
+    has_r_child = node.right is None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(current, previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+
+def depth_traverse(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def depth_first_touch(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
+def viewpoint_traverse(tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(current, previous, viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+
+
+def receive_and_reduce(comm, incoming_rank, image, add_to_front):
+    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+    arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
+        (image.shape[0], image.shape[1], image.shape[2]))
+
+    if add_to_front:
+        front = arr2
+        back = image
+    else:
+        front = image
+        back = arr2
+
+    if image.shape[2] == 3:
+        # Assume Projection Camera, Add
+        np.add(image, front, image)
+        return image
+
+    ta = 1.0 - front[:,:,3]
+    np.maximum(ta, 0.0, ta)
+    # This now does the following calculation, but in a memory
+    # conservative fashion
+    # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
+    image = back.copy()
+    for i in range(4):
+        np.multiply(image[:,:,i], ta, image[:,:,i])
+    np.add(image, front, image)
+    return image
+
+def send_to_parent(comm, outgoing_rank, image):
+    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    comm.send_array(image, outgoing_rank, tag=comm.rank)
+
+def scatter_image(comm, root, image):
+    mylog.debug( 'Scattering from %04i' % root)
+    image = comm.mpi_bcast(image, root=root)
+    return image
+
+def find_node(node, pos):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(np.all(node.left_edge <= pos))
+    assert(np.all(node.right_edge > pos))
+    while not kd_is_leaf(node):
+        if pos[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+

File yt/utilities/amr_kdtree/amr_kdtree.py

 
 Authors: Samuel Skillman <samskillman@gmail.com>
 Affiliation: University of Colorado at Boulder
-Wil St. Charles <fallen751@gmail.com>
-Affiliation: University of Colorado at Boulder
 
 Homepage: http://yt-project.org/
 License:
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+from yt.funcs import *
 import numpy as np
-from yt.funcs import *
-from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
-from yt.visualization.image_writer import write_image, write_bitmap
-from yt.utilities.lib import kdtree_get_choices
-from yt.utilities.lib.grid_traversal import PartitionedGrid
-from yt.utilities.performance_counters import yt_counters, time_function
+import h5py
+from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
+        depth_traverse, viewpoint_traverse, add_grids, \
+        receive_and_reduce, send_to_parent, scatter_image, find_node, \
+        depth_first_touch
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
-from copy import deepcopy
-from yt.config import ytcfg
-from time import time
-import h5py
+from yt.utilities.lib.grid_traversal import PartitionedGrid
+from yt.utilities.math_utils import periodic_position
 
-def corner_bounds(split_dim, split, current_left = None, current_right = None):
-    r"""
-    Given a kd-Tree split dimension and position and bound to be
-    modified, returns the new bound.
+import pdb
 
-    A simple function that replaces the `split_dim` dimension of the
-    current left or right bound with `split`. Left or Right bound is
-    chosen by specifying the `current_left` or `current_right`.
-    """
-    if(current_left is not None):
-        new_left = current_left.copy()
-        new_left[split_dim] = split
-        return new_left
-    elif(current_right is not None):
-        new_right = current_right.copy()
-        new_right[split_dim] = split
-        return new_right
+def my_break():
+    my_debug = False 
+    if my_debug: pdb.set_trace()
 
-def _lchild_id(id): return (id<<1) + 1
-def _rchild_id(id): return (id<<1) + 2
-def _parent_id(id): return (id-1)>>1
-
-steps = np.array([[-1, -1, -1],
-                  [-1, -1,  0],
-                  [-1, -1,  1],
-                  [-1,  0, -1],
-                  [-1,  0,  0],
-                  [-1,  0,  1],
-                  [-1,  1, -1],
-                  [-1,  1,  0],
-                  [-1,  1,  1],
+steps = np.array([[-1, -1, -1], [-1, -1,  0], [-1, -1,  1],
+                  [-1,  0, -1], [-1,  0,  0], [-1,  0,  1],
+                  [-1,  1, -1], [-1,  1,  0], [-1,  1,  1],
                   
-                  [ 0, -1, -1],
-                  [ 0, -1,  0],
-                  [ 0, -1,  1],
+                  [ 0, -1, -1], [ 0, -1,  0], [ 0, -1,  1],
                   [ 0,  0, -1],
                   # [ 0,  0,  0],
                   [ 0,  0,  1],
-                  [ 0,  1, -1],
-                  [ 0,  1,  0],
-                  [ 0,  1,  1],
+                  [ 0,  1, -1], [ 0,  1,  0], [ 0,  1,  1],
                   
-                  [ 1, -1, -1],
-                  [ 1, -1,  0],
-                  [ 1, -1,  1],
-                  [ 1,  0, -1],
-                  [ 1,  0,  0],
-                  [ 1,  0,  1],
-                  [ 1,  1, -1],
-                  [ 1,  1,  0],
-                  [ 1,  1,  1]
-                  ])
+                  [ 1, -1, -1], [ 1, -1,  0], [ 1, -1,  1],
+                  [ 1,  0, -1], [ 1,  0,  0], [ 1,  0,  1],
+                  [ 1,  1, -1], [ 1,  1,  0], [ 1,  1,  1] ])
 
+class Tree(object):
+    def __init__(self, pf, comm_rank=0, comm_size=1, left=None, right=None,
+            min_level=None, max_level=None, grids=None):
+        
+        self.pf = pf
+        self._id_offset = self.pf.h.grids[0]._id_offset
+        if left is None:
+            left = np.array([-np.inf]*3)
+        if right is None:
+            right = np.array([np.inf]*3)
 
-class MasterNode(object):
-    r"""
-    A MasterNode object is the building block of the AMR kd-Tree.
-    Used during the construction to act as both dividing nodes and
-    leaf nodes.
-    """
-    def __init__(self,my_id=None,
-                 parent=None,
-                 parent_grid=None,
-                 grids=None,
-                 l_corner=None,
-                 r_corner=None):
-        
-        self.grids = grids
-        self.parent = parent
-        self.parent_grid = parent_grid
-        self.l_corner = l_corner
-        self.r_corner = r_corner
+        if min_level is None: min_level = 0
+        if max_level is None: max_level = pf.h.max_level
+        self.min_level = min_level
+        self.max_level = max_level
+        self.comm_rank = comm_rank
+        self.comm_size = comm_size
+        self.trunk = Node(None, None, None,
+                left, right, None, 1)
+        if grids is None:
+            self.grids = pf.h.region((left+right)/2., left, right)._grids
+        else:
+            self.grids = grids
+        self.build(grids)
 
-        self.split_ax = None
-        self.split_pos = None
+    def add_grids(self, grids):
+        my_break() 
+        lvl_range = range(self.min_level, self.max_level+1)
+        if grids is None:
+            level_iter = self.pf.hierarchy.get_levels()
+            while True:
+                try:
+                    grids = level_iter.next()
+                except:
+                    break
+                if grids[0].Level not in lvl_range: continue
+                gmask = np.array([g in self.grids for g in grids])
+                gles =  np.array([g.LeftEdge for g in grids])[gmask]
+                gres =  np.array([g.RightEdge for g in grids])[gmask]
+                gids = np.array([g.id for g in grids])[gmask]
+                my_break()
+                add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
+                del gles, gres, gids, grids
+        else:
+            gles = np.array([g.LeftEdge for g in grids])
+            gres = np.array([g.RightEdge for g in grids])
+            gids = np.array([g.id for g in grids])
 
-        self.left_child = None
-        self.right_child = None
-        self.cost = 0
-        # self.owner = -1
+            add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids, grids
 
-        self.id = my_id
-        
-        self.grid = None
-        self.brick = None
-        self.li = None
-        self.ri = None
-        self.dims = None
 
-        # self.done = 0
-        # self.cast_done = 0
+    def build(self, grids = None):
+        self.add_grids(grids)
 
-def set_leaf(thisnode, grid_id, leaf_l_corner, leaf_r_corner):
-    r"""
-    Sets leaf properties.
+    def check_tree(self):
+        for node in depth_traverse(self):
+            if node.grid is None:
+                continue
+            grid = self.pf.h.grids[node.grid - self._id_offset]
+            dds = grid.dds
+            gle = grid.LeftEdge
+            gre = grid.RightEdge
+            li = np.rint((node.left_edge-gle)/dds).astype('int32')
+            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            dims = (ri - li).astype('int32')
+            assert(np.all(grid.LeftEdge <= node.left_edge))
+            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(dims > 0))
+            # print grid, dims, li, ri
 
-    Parameters
-    ----------
-    thisnode : `MasterNode`
-        AMR kd-Tree node to be modified.
-    grid_id : `~yt.data_objects.grid_patch`
-        A grid patch that contains the data spanned by this 
-        kd-Tree leaf.
-    leaf_l_corner: array_like, dimension 3
-        The left corner of the volume spanned by this leaf.
-    leaf_r_corner: array_like, dimension 3
-        The right corner of the volume spanned by this leaf.
-        
-    Returns
-    -------
-    None
-    """
-    thisnode.grid = grid_id.id
-    thisnode.l_corner = leaf_l_corner
-    thisnode.r_corner = leaf_r_corner
-    del thisnode.grids, thisnode.parent_grid, thisnode.split_ax, thisnode.split_pos
+        # Calculate the Volume
+        vol = kd_sum_volume(self.trunk)
+        mylog.debug('AMRKDTree volume = %e' % vol)
+        kd_node_check(self.trunk)
 
-class AMRKDTree(HomogenizedVolume):
+    def sum_cells(self):
+        cells = 0
+        for node in depth_traverse(self):
+            if node.grid is None:
+                continue
+            grid = self.pf.h.grids[node.grid - self._id_offset]
+            dds = grid.dds
+            gle = grid.LeftEdge
+            gre = grid.RightEdge
+            li = np.rint((node.left_edge-gle)/dds).astype('int32')
+            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            dims = (ri - li).astype('int32')
+            cells += np.prod(dims)
+
+        return cells
+
+class AMRKDTree(ParallelAnalysisInterface):
     def __init__(self, pf,  l_max=None, le=None, re=None,
-                 fields=None, no_ghost=False,
-                 tree_type='domain',log_fields=None, merge_trees=False):
-        r"""
-        AMR kd-Tree object, a homogenized volume.
+                 fields=None, no_ghost=False, min_level=None, max_level=None,
+                 log_fields=None,
+                 grids=None):
 
-        Definition of the AMR kd-Tree object.  This is a method of
-        volume homogenization that uses a modified kd-Tree structure
-        to partition the AMR hierarchy.  The dividing nodes of the
-        tree subdivide the volume into left and right children along a
-        particular dimension.  The leaf nodes of the tree contain
-        subvolumes that are covered by a single single grid at a
-        single resolution, usually the maximum level for that volume
-        unless `l_max` is otherwise specified.  The volume can then be
-        traversed along an arbitrary direction based on comparisions
-        with the dividing node position and split dimenstion.  
-
-        Parameters
-        ----------
-        pf : `~yt.data_objects.StaticOutput`
-            The parameter file to be kd-Tree partitioned.
-        l_max : int, optional
-            Maximum level to use in construction of kd-Tree. Default:
-            None (all levels)
-        le: array_like, optional
-            Left edge to be be partitioned. Default: None (Domain Left
-            Edge)
-        re: array_like. optional
-            Right edge to be partitioned.  Default: None (Domain Right
-            Edge)
-        fields: list of strings, optional
-            Fields to be obtained when collecting leaf data.  Defualt:
-            None (['Density']).
-        log_fields: list of bool, optional
-            Specifies which fields are to be taken the logarithm of
-            before rendering.
-        no_ghost: bool, optional
-            Optimization option.  If True, homogenized bricks will
-            extrapolate out from grid instead of interpolating from
-            ghost zones that have to first be calculated.  This can
-            lead to large speed improvements, but at a loss of
-            accuracy/smoothness in resulting image.  The effects are
-            less notable when the transfer function is smooth and
-            broad. Default: False
-        tree_type: string, optional
-            Specifies the type of kd-Tree to be constructed/cast.
-            There are three options, the default being 'domain'. Only
-            affects parallel rendering.  'domain' is suggested.
-
-            'domain' - Tree construction/casting is load balanced by
-            splitting up the domain into the first N subtrees among N
-            processors (N must be a power of 2).  Casting then
-            proceeds with each processor rendering their subvolume,