1. Matthew Turk
  2. yt-3.0

Commits

Sam Leitner  committed d01a068

moved ramses files over to use as template for artio; commited artio from src/tools/ with additional cpdef-able header reader; added wrapper file "artio_caller.pyx", along with compile ("make"), and execution example (call.py).

  • Participants
  • Parent commits aeff683
  • Branches yt-3.0

Comments (0)

Files changed (25)

File yt/frontends/artio/__init__.py

View file
+"""
+API for yt.frontends.ramses
+
+Author: Matthew Turk <matthewturk@gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi@gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith@gmail.com>
+Affiliation: MSU
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""

File yt/frontends/artio/_artio_reader.pyx

View file
+"""
+Wrapping code for Oliver Hahn's RamsesRead++
+
+Author: Matthew Turk <matthewturk@gmail.com>
+Affiliation: UCSD
+Author: Oliver Hahn <ohahn@stanford.edu>
+Affiliation: KIPAC / Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+# Cython wrapping code for Oliver Hahn's RAMSES reader
+from cython.operator cimport dereference as deref, preincrement as inc
+from libc.stdlib cimport malloc, free, abs, calloc, labs
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef extern from "math.h":
+    double ceil(double x)
+    double log2(double x)
+
+cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
+    if i0 > i1: return i0
+    return i1
+
+cdef inline np.int64_t i64min(np.int64_t i0, np.int64_t i1):
+    if i0 < i1: return i0
+    return i1
+
+cdef extern from "<vector>" namespace "std":
+    cdef cppclass vector[T]:
+        cppclass iterator:
+            T operator*()
+            iterator operator++()
+            bint operator==(iterator)
+            bint operator!=(iterator)
+        vector()
+        void push_back(T&)
+        T& operator[](int)
+        T& at(int)
+        iterator begin()
+        iterator end()
+
+cdef extern from "<map>" namespace "std":
+    cdef cppclass map[A,B]:
+        pass
+
+cdef extern from "string" namespace "std":
+    cdef cppclass string:
+        string(char *cstr)
+        char *c_str()
+        string operator*()
+
+cdef extern from "RAMSES_typedefs.h":
+    pass
+
+cdef extern from "RAMSES_info.hh" namespace "RAMSES":
+    enum codeversion:
+        version1
+        version2
+        version3
+
+    cdef cppclass snapshot:
+        string m_filename
+        codeversion m_version
+        cppclass info_data:
+            unsigned ncpu
+            unsigned ndim
+            unsigned levelmin
+            unsigned levelmax
+            unsigned ngridmax
+            unsigned nstep_coarse
+            double boxlen
+            double time
+            double aexp
+            double H0
+            double omega_m
+            double omega_l
+            double omega_k
+            double omega_b
+            double unit_l
+            double unit_d
+            double unit_t
+
+        info_data m_header
+        vector[double] ind_min
+        vector[double] ind_max
+
+        snapshot(string info_filename, codeversion ver)
+
+        unsigned get_snapshot_num()
+        unsigned getdomain_bykey( double key )
+
+    #void hilbert3d(vector[double] &x, vector[double] &y, vector[double] &z,
+    #               vector[double] &order, unsigned bit_length)
+
+cdef extern from "RAMSES_amr_data.hh" namespace "RAMSES::AMR":
+    cdef cppclass vec[real_t]:
+        real_t x, y, z
+        vec( real_t x_, real_t y_, real_t z_)
+        vec ( vec& v)
+        vec ( )
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass cell_locally_essential[id_t, real_t]:
+        id_t m_neighbor[6]
+        id_t m_father
+        id_t m_son[8]
+        real_t m_xg[3]
+        id_t m_cpu
+
+        char m_pos
+
+        cell_locally_essential()
+
+        bint is_refined(int ison)
+
+    cdef cppclass RAMSES_cell:
+        unsigned m_neighbour[6]
+        unsigned m_father
+        unsigned m_son[8]
+        float m_xg[3]
+        unsigned m_cpu
+
+        char m_pos
+
+        RAMSES_cell()
+
+        bint is_refined(int ison)
+
+    cdef cppclass cell_simple[id_t, real_t]:
+        id_t m_son[1]
+        real_t m_xg[3]
+        id_t m_cpu
+
+        char m_pos
+        cell_simple()
+        bint is_refined(int ison)
+
+    # AMR level implementation
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass level[Cell_]:
+        unsigned m_ilevel
+        vector[Cell_] m_level_cells
+        double m_xc[8]
+        double m_yc[8]
+        double m_zc[8]
+
+        # I skipped the typedefs here
+
+        double m_dx
+        unsigned m_nx
+        level (unsigned ilevel)
+
+        void register_cell( Cell_ cell )
+        vector[Cell_].iterator begin()
+        vector[Cell_].iterator end()
+
+        Cell_& operator[]( unsigned i)
+        unsigned size()
+
+    cdef cppclass RAMSES_level:
+        unsigned m_ilevel
+        vector[RAMSES_cell] m_level_cells
+        double m_xc[8]
+        double m_yc[8]
+        double m_zc[8]
+
+        # I skipped the typedefs here
+
+        double m_dx
+        unsigned m_nx
+        RAMSES_level (unsigned ilevel)
+
+        void register_cell( RAMSES_cell cell )
+        vector[RAMSES_cell].iterator begin()
+        vector[RAMSES_cell].iterator end()
+
+        RAMSES_cell& operator[]( unsigned i)
+        unsigned size()
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass tree[Cell_, Level_]:
+        cppclass header:
+            vector[int] nx
+            vector[int] nout
+            vector[int] nsteps
+            int ncpu
+            int ndim
+            int nlevelmax
+            int ngridmax
+            int nboundary
+            int ngrid_current
+            double boxlen
+            vector[double] tout
+            vector[double] aout
+            vector[double] dtold
+            vector[double] dtnew
+            vector[double] cosm
+            vector[double] timing
+            double t
+            double mass_sph
+
+        vector[Level_] m_AMR_levels
+        vector[unsigned] mheadl, m_numbl, m_taill
+
+        int m_cpu
+        int m_minlevel
+        int m_maxlevel
+        string m_fname
+        unsigned m_ncoarse
+        header m_header
+
+        # This is from later on in the .hh file ... I don't think we need them
+        # typedef proto_iterator<const tree*> const_iterator;
+        # typedef proto_iterator<tree *> iterator;
+
+        tree (snapshot &snap, int cpu, int maxlevel, int minlevel = 1)
+
+    cppclass tree_iterator "RAMSES::AMR::RAMSES_tree::iterator":
+        tree_iterator operator*()
+        RAMSES_cell operator*()
+        tree_iterator begin()
+        tree_iterator end()
+        tree_iterator to_parent()
+        tree_iterator get_parent()
+        void next()
+        bint operator!=(tree_iterator other)
+        unsigned get_cell_father()
+        bint is_finest(int ison)
+        int get_absolute_position()
+        int get_domain()
+
+    cdef cppclass RAMSES_tree:
+        # This is, strictly speaking, not a header.  But, I believe it is
+        # going to work alright.
+        cppclass header:
+            vector[int] nx
+            vector[int] nout
+            vector[int] nsteps
+            int ncpu
+            int ndim
+            int nlevelmax
+            int ngridmax
+            int nboundary
+            int ngrid_current
+            double boxlen
+            vector[double] tout
+            vector[double] aout
+            vector[double] dtold
+            vector[double] dtnew
+            vector[double] cosm
+            vector[double] timing
+            double t
+            double mass_sph
+
+        vector[RAMSES_level] m_AMR_levels
+        vector[unsigned] mheadl, m_numbl, m_taill
+
+        int m_cpu
+        int m_minlevel
+        int m_maxlevel
+        string m_fname
+        unsigned m_ncoarse
+        header m_header
+
+        unsigned size()
+
+        # This is from later on in the .hh file ... I don't think we need them
+        # typedef proto_iterator<const tree*> const_iterator;
+        # typedef proto_iterator<tree *> iterator;
+
+        RAMSES_tree(snapshot &snap, int cpu, int maxlevel, int minlevel)
+        void read()
+
+        tree_iterator begin(int ilevel)
+        tree_iterator end(int ilevel)
+
+        tree_iterator begin()
+        tree_iterator end()
+
+        vec[double] cell_pos_double "cell_pos<double>" (tree_iterator it, unsigned ind) 
+        vec[double] grid_pos_double "grid_pos<double>" (tree_iterator it)
+        vec[float] cell_pos_float "cell_pos<float>" (tree_iterator it, unsigned ind) 
+        vec[float] grid_pos_float "grid_pos<float>" (tree_iterator it)
+
+cdef extern from "RAMSES_amr_data.hh" namespace "RAMSES::HYDRO":
+    enum hydro_var:
+        density
+        velocity_x
+        velocity_y
+        velocity_z
+        pressure
+        metallicit
+
+    char ramses_hydro_variables[][64]
+
+    # There are a number of classes here that we could wrap and utilize.
+    # However, I will only wrap the methods I know we need.
+
+    # I have no idea if this will work.
+    cdef cppclass TreeTypeIterator[TreeType_]:
+        pass
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass data[TreeType_, Real_]:
+        cppclass header:
+            unsigned ncpu
+            unsigned nvar
+            unsigned ndim
+            unsigned nlevelmax
+            unsigned nboundary
+            double gamma
+        string m_fname
+        header m_header
+
+        # I don't want to implement proto_data, so we put this here
+        Real_& cell_value(TreeTypeIterator[TreeType_] &it, int ind)
+
+        unsigned m_nvars
+        vector[string] m_varnames
+        map[string, unsigned] m_var_name_map
+
+        data(TreeType_ &AMRtree)
+        #_OutputIterator get_var_names[_OutputIterator](_OutputIterator names)
+        void read(string varname)
+
+    cdef cppclass RAMSES_hydro_data:
+        cppclass header:
+            unsigned ncpu
+            unsigned nvar
+            unsigned ndim
+            unsigned nlevelmax
+            unsigned nboundary
+            double gamma
+        string m_fname
+        header m_header
+
+        # I don't want to implement proto_data, so we put this here
+        double cell_value (tree_iterator &it, int ind)
+
+        unsigned m_nvars
+        vector[string] m_varnames
+        map[string, unsigned] m_var_name_map
+
+        RAMSES_hydro_data(RAMSES_tree &AMRtree)
+        #_OutputIterator get_var_names[_OutputIterator](_OutputIterator names)
+        void read(string varname)
+
+        vector[vector[double]] m_var_array
+
+cdef class RAMSES_tree_proxy:
+    cdef string *snapshot_name
+    cdef snapshot *rsnap
+
+    cdef RAMSES_tree** trees
+    cdef RAMSES_hydro_data*** hydro_datas
+
+    cdef int **loaded
+
+    cdef public object field_ind
+    cdef public object field_names
+
+    # We will store this here so that we have a record, independent of the
+    # header, of how many things we have allocated
+    cdef int ndomains, nfields
+    
+    def __cinit__(self, char *fn):
+        cdef int idomain, ifield, ii
+        cdef RAMSES_tree *local_tree
+        cdef RAMSES_hydro_data *local_hydro_data
+        self.snapshot_name = new string(fn)
+        self.rsnap = new snapshot(deref(self.snapshot_name), version3)
+        # We now have to get our field names to fill our array
+        self.trees = <RAMSES_tree**>\
+            malloc(sizeof(RAMSES_tree*) * self.rsnap.m_header.ncpu)
+        for ii in range(self.ndomains): self.trees[ii] = NULL
+        self.hydro_datas = <RAMSES_hydro_data ***>\
+                       malloc(sizeof(RAMSES_hydro_data**) * self.rsnap.m_header.ncpu)
+        self.ndomains = self.rsnap.m_header.ncpu
+        
+        # Note we don't do ncpu + 1
+        for idomain in range(self.rsnap.m_header.ncpu):
+            # we don't delete local_tree
+            local_tree = new RAMSES_tree(deref(self.rsnap), idomain + 1,
+                                         self.rsnap.m_header.levelmax, 0)
+            local_tree.read()
+            local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
+            self.hydro_datas[idomain] = <RAMSES_hydro_data **>\
+                malloc(sizeof(RAMSES_hydro_data*) * local_hydro_data.m_nvars)
+            for ii in range(local_hydro_data.m_nvars):
+                self.hydro_datas[idomain][ii] = \
+                    new RAMSES_hydro_data(deref(local_tree))
+            self.trees[idomain] = local_tree
+            # We do not delete the final snapshot, which we'll use later
+            #if idomain + 1 < self.rsnap.m_header.ncpu:
+            #    del local_hydro_data
+        # Only once, we read all the field names
+        self.nfields = local_hydro_data.m_nvars
+        cdef string *field_name
+        self.field_names = []
+        self.field_ind = {}
+        self.loaded = <int **> malloc(sizeof(int*) * self.ndomains)
+        for idomain in range(self.ndomains):
+            self.loaded[idomain] = <int *> malloc(
+                sizeof(int) * local_hydro_data.m_nvars)
+            for ifield in range(local_hydro_data.m_nvars):
+                self.loaded[idomain][ifield] = 0
+        for ifield in range(local_hydro_data.m_nvars):
+            field_name = &(local_hydro_data.m_varnames[ifield])
+            # Does this leak?
+            self.field_names.append(field_name.c_str())
+            self.field_ind[self.field_names[-1]] = ifield
+        # This all needs to be cleaned up in the deallocator
+
+    def get_domain_boundaries(self):
+        bounds = []
+        for i in range(self.rsnap.m_header.ncpu):
+            bounds.append((self.rsnap.ind_min[i],
+                           self.rsnap.ind_max[i]))
+        return bounds
+
+    def __dealloc__(self):
+        cdef int idomain, ifield
+        # To ensure that 'delete' is used, not 'free',
+        # we allocate temporary variables.
+        cdef RAMSES_tree *temp_tree
+        cdef RAMSES_hydro_data *temp_hdata
+        for idomain in range(self.ndomains):
+            for ifield in range(self.nfields):
+                temp_hdata = self.hydro_datas[idomain][ifield]
+                del temp_hdata
+            temp_tree = self.trees[idomain]
+            del temp_tree
+            free(self.loaded[idomain])
+            free(self.hydro_datas[idomain])
+        free(self.trees)
+        free(self.hydro_datas)
+        free(self.loaded)
+        if self.snapshot_name != NULL: del self.snapshot_name
+        if self.rsnap != NULL: del self.rsnap
+        
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def count_zones(self):
+        # We need to do simulation domains here
+
+        cdef unsigned idomain, ilevel
+        cdef RAMSES_tree *local_tree
+        cdef RAMSES_hydro_data *local_hydro_data
+        cdef RAMSES_level *local_level
+        cdef tree_iterator grid_it, grid_end
+
+        # All the loop-local pointers must be declared up here
+
+        cdef np.ndarray[np.int64_t, ndim=1] cell_count
+        cell_count = np.zeros(self.rsnap.m_header.levelmax + 1, 'int64')
+        cdef int local_count = 0
+        cdef int tree_count
+        for idomain in range(1, self.rsnap.m_header.ncpu + 1):
+            local_tree = self.trees[idomain - 1]
+            for ilevel in range(local_tree.m_maxlevel + 1):
+                local_count = 0
+                tree_count = 0
+                local_level = &local_tree.m_AMR_levels[ilevel]
+                grid_it = local_tree.begin(ilevel)
+                grid_end = local_tree.end(ilevel)
+                while grid_it != grid_end:
+                    local_count += (grid_it.get_domain() == idomain)
+                    tree_count += 1
+                    grid_it.next()
+                cell_count[ilevel] += local_count
+
+        return cell_count
+
+    cdef ensure_loaded(self, char *varname, int domain_index, int varindex = -1):
+        # this domain_index must be zero-indexed
+        if varindex == -1: varindex = self.field_ind[varname]
+        if self.loaded[domain_index][varindex] == 1:
+            return
+        cdef string *field_name = new string(varname)
+        print "READING FROM DISK", varname, domain_index, varindex
+        self.hydro_datas[domain_index][varindex].read(deref(field_name))
+        self.loaded[domain_index][varindex] = 1
+        del field_name
+
+    def clear_tree(self, char *varname, int domain_index):
+        # this domain_index must be zero-indexed
+        # We delete and re-create
+        cdef int varindex = self.field_ind[varname]
+        cdef string *field_name = new string(varname)
+        cdef RAMSES_hydro_data *temp_hdata
+        if self.loaded[domain_index][varindex] == 1:
+            temp_hdata = self.hydro_datas[domain_index][varindex]
+            del temp_hdata
+            self.hydro_datas[domain_index][varindex] = \
+                new RAMSES_hydro_data(deref(self.trees[domain_index]))
+            self.loaded[domain_index][varindex] = 0
+        del field_name
+
+    def get_file_info(self):
+        header_info = {}
+        header_info["ncpu"] = self.rsnap.m_header.ncpu
+        header_info["ndim"] = self.rsnap.m_header.ndim
+        header_info["levelmin"] = self.rsnap.m_header.levelmin
+        header_info["levelmax"] = self.rsnap.m_header.levelmax
+        header_info["ngridmax"] = self.rsnap.m_header.ngridmax
+        header_info["nstep_coarse"] = self.rsnap.m_header.nstep_coarse
+        header_info["boxlen"] = self.rsnap.m_header.boxlen
+        header_info["time"] = self.rsnap.m_header.time
+        header_info["aexp"] = self.rsnap.m_header.aexp
+        header_info["H0"] = self.rsnap.m_header.H0
+        header_info["omega_m"] = self.rsnap.m_header.omega_m
+        header_info["omega_l"] = self.rsnap.m_header.omega_l
+        header_info["omega_k"] = self.rsnap.m_header.omega_k
+        header_info["omega_b"] = self.rsnap.m_header.omega_b
+        header_info["unit_l"] = self.rsnap.m_header.unit_l
+        header_info["unit_d"] = self.rsnap.m_header.unit_d
+        header_info["unit_t"] = self.rsnap.m_header.unit_t
+
+        # Now we grab some from the trees
+        cdef np.ndarray[np.int32_t, ndim=1] top_grid_dims = np.zeros(3, "int32")
+        cdef int i
+        for i in range(3):
+            # Note that nx is really boundary conditions.  We always have 2.
+            top_grid_dims[i] = self.trees[0].m_header.nx[i]
+            top_grid_dims[i] = 2
+        header_info["nx"] = top_grid_dims
+
+        return header_info
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def fill_hierarchy_arrays(self, 
+                              np.ndarray[np.int32_t, ndim=1] top_grid_dims,
+                              np.ndarray[np.float64_t, ndim=2] left_edges,
+                              np.ndarray[np.float64_t, ndim=2] right_edges,
+                              np.ndarray[np.int32_t, ndim=2] grid_levels,
+                              np.ndarray[np.int64_t, ndim=2] grid_file_locations,
+                              np.ndarray[np.uint64_t, ndim=1] grid_hilbert_indices,
+                              np.ndarray[np.int32_t, ndim=2] child_mask,
+                              np.ndarray[np.float64_t, ndim=1] domain_left,
+                              np.ndarray[np.float64_t, ndim=1] domain_right):
+        # We need to do simulation domains here
+
+        cdef unsigned idomain, ilevel
+
+        # All the loop-local pointers must be declared up here
+        cdef RAMSES_tree *local_tree
+        cdef RAMSES_hydro_data *local_hydro_data
+        cdef unsigned father
+
+        cdef tree_iterator grid_it, grid_end, father_it
+        cdef vec[double] gvec
+        cdef int grid_ind = 0, grid_aind = 0
+        cdef unsigned parent_ind
+        cdef bint ci
+        cdef double pos[3]
+        cdef double grid_half_width 
+        cdef unsigned long rv
+
+        cdef np.int32_t rr
+        cdef int i
+        cdef np.ndarray[np.int64_t, ndim=1] level_cell_counts
+        level_cell_counts = np.zeros(self.rsnap.m_header.levelmax + 1, 'int64')
+        for idomain in range(1, self.rsnap.m_header.ncpu + 1):
+            local_tree = self.trees[idomain - 1]
+            for ilevel in range(local_tree.m_maxlevel + 1):
+                # this gets overwritten for every domain, which is okay
+                level_cell_counts[ilevel] = grid_ind 
+                #grid_half_width = self.rsnap.m_header.boxlen / \
+                grid_half_width = 1.0 / \
+                    (2**(ilevel) * top_grid_dims[0])
+                grid_it = local_tree.begin(ilevel)
+                grid_end = local_tree.end(ilevel)
+                while grid_it != grid_end:
+                    if grid_it.get_domain() != idomain:
+                        grid_ind += 1
+                        grid_it.next()
+                        continue
+                    gvec = local_tree.grid_pos_double(grid_it)
+                    left_edges[grid_aind, 0] = pos[0] = gvec.x - grid_half_width
+                    left_edges[grid_aind, 1] = pos[1] = gvec.y - grid_half_width
+                    left_edges[grid_aind, 2] = pos[2] = gvec.z - grid_half_width
+                    for i in range(3):
+                        pos[i] = (pos[i] - domain_left[i]) / (domain_right[i] - domain_left[i])
+                    right_edges[grid_aind, 0] = gvec.x + grid_half_width
+                    right_edges[grid_aind, 1] = gvec.y + grid_half_width
+                    right_edges[grid_aind, 2] = gvec.z + grid_half_width
+                    grid_levels[grid_aind, 0] = ilevel
+                    # Now the harder part
+                    father_it = grid_it.get_parent()
+                    grid_file_locations[grid_aind, 0] = <np.int64_t> idomain
+                    grid_file_locations[grid_aind, 1] = grid_ind - level_cell_counts[ilevel]
+                    parent_ind = father_it.get_absolute_position()
+                    if ilevel > 0:
+                        # We calculate the REAL parent index
+                        grid_file_locations[grid_aind, 2] = \
+                            level_cell_counts[ilevel - 1] + parent_ind
+                    else:
+                        grid_file_locations[grid_aind, 2] = -1
+                    for ci in range(8):
+                        rr = <np.int32_t> grid_it.is_finest(ci)
+                        child_mask[grid_aind, ci] = rr
+                    grid_ind += 1
+                    grid_aind += 1
+                    grid_it.next()
+
+    def read_oct_grid(self, char *field, int level, int domain, int grid_id):
+
+        self.ensure_loaded(field, domain - 1)
+        cdef int varindex = self.field_ind[field]
+        cdef int i
+
+        cdef np.ndarray[np.float64_t, ndim=3] tr = np.empty((2,2,2), dtype='float64',
+                                                   order='F')
+        cdef tree_iterator grid_it, grid_end
+        cdef double* data = <double*> tr.data
+
+        cdef RAMSES_tree *local_tree = self.trees[domain - 1]
+        cdef RAMSES_hydro_data *local_hydro_data = self.hydro_datas[domain - 1][varindex]
+
+        #inline ValueType_& cell_value( const typename TreeType_::iterator& it,
+        #                               int ind )
+        #{
+        #   unsigned ipos   = it.get_absolute_position();
+        #   unsigned ilevel = it.get_level();//-m_minlevel;
+        #   return (m_var_array[ilevel])[m_twotondim*ipos+ind];
+        #}
+        
+        for i in range(8): 
+            data[i] = local_hydro_data.m_var_array[level][8*grid_id+i]
+        return tr
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def read_grid(self, int varindex, char *field,
+                  np.ndarray[np.int64_t, ndim=1] start_index,
+                  np.ndarray[np.int32_t, ndim=1] grid_dims,
+                  np.ndarray[np.float64_t, ndim=3] data,
+                  np.ndarray[np.int32_t, ndim=3] filled,
+                  int level, int ref_factor,
+                  np.ndarray[np.int64_t, ndim=2] component_grid_info):
+        cdef RAMSES_tree *local_tree = NULL
+        cdef RAMSES_hydro_data *local_hydro_data = NULL
+
+        cdef int gi, i, j, k, domain, offset
+        cdef int ir, jr, kr
+        cdef int n
+        cdef int offi, offj, offk, odind
+        cdef np.int64_t di, dj, dk
+        cdef np.int32_t og_start_index[3]
+        cdef np.float64_t temp_data
+        cdef np.int64_t end_index[3]
+        cdef int to_fill = 0
+        # Note that indexing into a cell is:
+        #   (k*2 + j)*2 + i
+        for i in range(3):
+            end_index[i] = start_index[i] + grid_dims[i]
+        for gi in range(component_grid_info.shape[0]):
+            domain = component_grid_info[gi,0]
+            if domain == 0: continue
+            self.ensure_loaded(field, domain - 1, varindex)
+            local_tree = self.trees[domain - 1]
+            local_hydro_data = self.hydro_datas[domain - 1][varindex]
+            offset = component_grid_info[gi,1]
+            for n in range(3): og_start_index[n] = component_grid_info[gi,3+n]
+            for i in range(2*ref_factor):
+                di = i + og_start_index[0] * ref_factor
+                if di < start_index[0] or di >= end_index[0]: continue
+                ir = <int> (i / ref_factor)
+                for j in range(2 * ref_factor):
+                    dj = j + og_start_index[1] * ref_factor
+                    if dj < start_index[1] or dj >= end_index[1]: continue
+                    jr = <int> (j / ref_factor)
+                    for k in range(2 * ref_factor):
+                        dk = k + og_start_index[2] * ref_factor
+                        if dk < start_index[2] or dk >= end_index[2]: continue
+                        kr = <int> (k / ref_factor)
+                        offi = di - start_index[0]
+                        offj = dj - start_index[1]
+                        offk = dk - start_index[2]
+                        #print offi, filled.shape[0],
+                        #print offj, filled.shape[1],
+                        #print offk, filled.shape[2]
+                        if filled[offi, offj, offk] == 1: continue
+
+                        odind = (kr*2 + jr)*2 + ir
+                        temp_data = local_hydro_data.m_var_array[
+                                level][8*offset + odind]
+                        data[offi, offj, offk] = temp_data
+                        filled[offi, offj, offk] = 1
+                        to_fill += 1
+        return to_fill
+
+cdef class ProtoSubgrid:
+    cdef np.int64_t *signature[3]
+    cdef np.int64_t left_edge[3]
+    cdef np.int64_t right_edge[3]
+    cdef np.int64_t dimensions[3]
+    cdef public np.float64_t efficiency
+    cdef np.int64_t *sigs[3]
+    cdef public object grid_file_locations
+    cdef public object dd
+        
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def __cinit__(self,
+                   np.ndarray[np.int64_t, ndim=1] left_index,
+                   np.ndarray[np.int64_t, ndim=1] dimensions, 
+                   np.ndarray[np.int64_t, ndim=2] left_edges,
+                   np.ndarray[np.int64_t, ndim=2] grid_file_locations):
+        # This also includes the shrinking step.
+        cdef int i, ci, ng = left_edges.shape[0]
+        cdef np.ndarray temp_arr
+        cdef int l0, r0, l1, r1, l2, r2, i0, i1, i2
+        cdef np.int64_t temp_l[3], temp_r[3], ncells
+        cdef np.float64_t efficiency
+        for i in range(3):
+            temp_l[i] = left_index[i] + dimensions[i]
+            temp_r[i] = left_index[i]
+            self.signature[i] = NULL
+        for gi in range(ng):
+            if left_edges[gi,0] > left_index[0]+dimensions[0] or \
+               left_edges[gi,0] + 2 < left_index[0] or \
+               left_edges[gi,1] > left_index[1]+dimensions[1] or \
+               left_edges[gi,1] + 2 < left_index[1] or \
+               left_edges[gi,2] > left_index[2]+dimensions[2] or \
+               left_edges[gi,2] + 2 < left_index[2]:
+               #print "Skipping grid", gi, "which lies outside out box"
+               continue
+            for i in range(3):
+                temp_l[i] = i64min(left_edges[gi,i], temp_l[i])
+                temp_r[i] = i64max(left_edges[gi,i] + 2, temp_r[i])
+        for i in range(3):
+            self.left_edge[i] = i64max(temp_l[i], left_index[i])
+            self.right_edge[i] = i64min(temp_r[i], left_index[i] + dimensions[i])
+            self.dimensions[i] = self.right_edge[i] - self.left_edge[i]
+            if self.dimensions[i] <= 0:
+                self.efficiency = -1.0
+                return
+            self.sigs[i] = <np.int64_t *> malloc(
+                                sizeof(np.int64_t) * self.dimensions[i])
+            for gi in range(self.dimensions[i]): self.sigs[i][gi] = 0
+        
+        # My guess is that this whole loop could be done more efficiently.
+        # However, this is clear and straightforward, so it is a good first
+        # pass.
+        cdef np.int64_t *sig0, *sig1, *sig2
+        sig0 = self.sigs[0]
+        sig1 = self.sigs[1]
+        sig2 = self.sigs[2]
+        efficiency = 0.0
+        cdef int used
+        cdef np.ndarray[np.int32_t, ndim=1] mask
+        mask = np.zeros(ng, 'int32')
+        used = 0
+        for gi in range(ng):
+            for l0 in range(2):
+                i0 = left_edges[gi, 0] + l0
+                if i0 < self.left_edge[0]: continue
+                if i0 >= self.right_edge[0]: break
+                for l1 in range(2):
+                    i1 = left_edges[gi, 1] + l1
+                    if i1 < self.left_edge[1]: continue
+                    if i1 >= self.right_edge[1]: break
+                    for l2 in range(2):
+                        i2 = left_edges[gi, 2] + l2
+                        if i2 < self.left_edge[2]: continue
+                        if i2 >= self.right_edge[2]: break
+                        i = i0 - self.left_edge[0]
+                        sig0[i] += 1
+                        i = i1 - self.left_edge[1]
+                        sig1[i] += 1
+                        i = i2 - self.left_edge[2]
+                        sig2[i] += 1
+                        efficiency += 1
+                        mask[gi] = 1
+            used += mask[gi]
+        cdef np.ndarray[np.int64_t, ndim=2] gfl
+        gfl = np.zeros((used, 6), 'int64')
+        used = 0
+        self.grid_file_locations = gfl
+        for gi in range(ng):
+            if mask[gi] == 1:
+                grid_file_locations[gi,3] = left_edges[gi,0]
+                grid_file_locations[gi,4] = left_edges[gi,1]
+                grid_file_locations[gi,5] = left_edges[gi,2]
+                for i in range(6):
+                    gfl[used, i] = grid_file_locations[gi,i]
+                used += 1
+         
+        self.dd = np.ones(3, dtype='int64')
+        for i in range(3):
+            efficiency /= self.dimensions[i]
+            self.dd[i] = self.dimensions[i]
+        #print "Efficiency is %0.3e" % (efficiency)
+        self.efficiency = efficiency
+
+    def __dealloc__(self):
+        free(self.sigs[0])
+        free(self.sigs[1])
+        free(self.sigs[2])
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void find_split(self, int *tr,):
+        # First look for zeros
+        cdef int i, center, ax
+        cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
+        cdef np.int64_t strength, zcstrength, zcp
+        axes = np.argsort(self.dd)[::-1]
+        cdef np.int64_t *sig
+        for axi in range(3):
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
+            for i in range(self.dimensions[ax]):
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+        zcstrength = 0
+        zcp = 0
+        zca = -1
+        cdef int temp
+        cdef np.int64_t *sig2d
+        for axi in range(3):
+            ax = axes[axi]
+            sig = self.sigs[ax]
+            sig2d = <np.int64_t *> malloc(sizeof(np.int64_t) * self.dimensions[ax])
+            sig2d[0] = sig2d[self.dimensions[ax]-1] = 0
+            for i in range(1, self.dimensions[ax] - 1):
+                sig2d[i] = sig[i-1] - 2*sig[i] + sig[i+1]
+            for i in range(1, self.dimensions[ax] - 1):
+                if sig2d[i] * sig2d[i+1] <= 0:
+                    strength = labs(sig2d[i] - sig2d[i+1])
+                    if (strength > zcstrength) or \
+                       (strength == zcstrength and (abs(center - i) <
+                                                    abs(center - zcp))):
+                        zcstrength = strength
+                        zcp = i
+                        zca = ax
+            free(sig2d)
+        #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
+        tr[0] = 1; tr[1] = ax; tr[2] = zcp
+        return
+
+    @cython.wraparound(False)
+    cdef void find_split_center(self, int *tr,):
+        # First look for zeros
+        cdef int i, center, ax
+        cdef int flip
+        cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
+        cdef np.int64_t strength, zcstrength, zcp
+        axes = np.argsort(self.dd)[::-1]
+        cdef np.int64_t *sig
+        for axi in range(3):
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
+            #frequently get stuck with many zeroes near the edge of the grid
+            #let's start from the middle, working out
+            for j in range(self.dimensions[ax]/2):
+                flip = 1
+                i = self.dimensions[ax]/2+j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                i = self.dimensions[ax]/2-j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                    
+                
+        zcstrength = 0
+        zcp = 0
+        zca = -1
+        cdef int temp
+        cdef np.int64_t *sig2d
+        for axi in range(3):
+            ax = axes[axi]
+            sig = self.sigs[ax]
+            sig2d = <np.int64_t *> malloc(sizeof(np.int64_t) * self.dimensions[ax])
+            sig2d[0] = sig2d[self.dimensions[ax]-1] = 0
+            for i in range(1, self.dimensions[ax] - 1):
+                sig2d[i] = sig[i-1] - 2*sig[i] + sig[i+1]
+            for i in range(1, self.dimensions[ax] - 1):
+                if sig2d[i] * sig2d[i+1] <= 0:
+                    strength = labs(sig2d[i] - sig2d[i+1])
+                    if (strength > zcstrength) or \
+                       (strength == zcstrength and (abs(center - i) <
+                                                    abs(center - zcp))):
+                        zcstrength = strength
+                        zcp = i
+                        zca = ax
+            free(sig2d)
+        #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
+        tr[0] = 1; tr[1] = ax; tr[2] = zcp
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def get_properties(self):
+        cdef np.ndarray[np.int64_t, ndim=2] tr = np.empty((3,3), dtype='int64')
+        cdef int i
+        for i in range(3):
+            tr[0,i] = self.left_edge[i]
+            tr[1,i] = self.right_edge[i]
+            tr[2,i] = self.dimensions[i]
+        return tr
+
+@cython.cdivision
+cdef np.int64_t graycode(np.int64_t x):
+    return x^(x>>1)
+
+@cython.cdivision
+cdef np.int64_t igraycode(np.int64_t x):
+    cdef np.int64_t i, j
+    if x == 0:
+        return x
+    m = <np.int64_t> ceil(log2(x)) + 1
+    i, j = x, 1
+    while j < m:
+        i = i ^ (x>>j)
+        j += 1
+    return i
+
+@cython.cdivision
+cdef np.int64_t direction(np.int64_t x, np.int64_t n):
+    #assert x < 2**n
+    if x == 0:
+        return 0
+    elif x%2 == 0:
+        return tsb(x-1, n)%n
+    else:
+        return tsb(x, n)%n
+
+@cython.cdivision
+cdef np.int64_t tsb(np.int64_t x, np.int64_t width):
+    #assert x < 2**width
+    cdef np.int64_t i = 0
+    while x&1 and i <= width:
+        x = x >> 1
+        i += 1
+    return i
+
+@cython.cdivision
+cdef np.int64_t bitrange(np.int64_t x, np.int64_t width,
+                         np.int64_t start, np.int64_t end):
+    return x >> (width-end) & ((2**(end-start))-1)
+
+@cython.cdivision
+cdef np.int64_t rrot(np.int64_t x, np.int64_t i, np.int64_t width):
+    i = i%width
+    x = (x>>i) | (x<<width-i)
+    return x&(2**width-1)
+
+@cython.cdivision
+cdef np.int64_t lrot(np.int64_t x, np.int64_t i, np.int64_t width):
+    i = i%width
+    x = (x<<i) | (x>>width-i)
+    return x&(2**width-1)
+
+@cython.cdivision
+cdef np.int64_t transform(np.int64_t entry, np.int64_t direction,
+                          np.int64_t width, np.int64_t x):
+    return rrot((x^entry), direction + 1, width)
+
+@cython.cdivision
+cdef np.int64_t entry(np.int64_t x):
+    if x == 0: return 0
+    return graycode(2*((x-1)/2))
+
+@cython.cdivision
+def get_hilbert_indices(int order, np.ndarray[np.int64_t, ndim=2] left_index):
+    # This is inspired by the scurve package by user cortesi on GH.
+    cdef int o, i
+    cdef np.int64_t x, w, h, e, d, l, b
+    cdef np.int64_t p[3]
+    cdef np.ndarray[np.int64_t, ndim=1] hilbert_indices
+    hilbert_indices = np.zeros(left_index.shape[0], 'int64')
+    for o in range(left_index.shape[0]):
+        p[0] = left_index[o, 0]
+        p[1] = left_index[o, 1]
+        p[2] = left_index[o, 2]
+        h = e = d = 0
+        for i in range(order):
+            l = 0
+            for x in range(3):
+                b = bitrange(p[3-x-1], order, i, i+1)
+                l |= (b<<x)
+            l = transform(e, d, 3, l)
+            w = igraycode(l)
+            e = e ^ lrot(entry(w), d+1, 3)
+            d = (d + direction(w, 3) + 1)%3
+            h = (h<<3)|w
+        hilbert_indices[o] = h
+    return hilbert_indices
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind, 
+                            np.ndarray[np.int64_t, ndim=1] uind,
+                            np.ndarray[np.int64_t, ndim=2] lefts,
+                            np.ndarray[np.int64_t, ndim=2] files):
+    #ind are the hilbert indices 
+    #uind are the unique hilbert indices                        
+    #count[n] track of how many times the nth index of uind occurs in ind
+    
+    cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros(uind.shape[0], 'int64')
+    cdef int n, i
+    cdef np.int64_t mi, mui
+    
+    #fill in the count array
+    for i in range(ind.shape[0]):
+        mi = ind[i]
+        for n in range(uind.shape[0]):
+            if uind[n] == mi:
+                count[n] += 1
+                break
+    
+    cdef np.int64_t **alefts
+    cdef np.int64_t **afiles
+    afiles = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
+    alefts = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
+    cdef int *li = <int *> malloc(sizeof(int) * uind.shape[0])
+    cdef np.ndarray[np.int64_t, ndim=2] locations
+    cdef np.ndarray[np.int64_t, ndim=2] left
+    all_locations = []
+    all_lefts = []
+    
+    #having measure the repetition of each hilbert index,
+    #we can know declare how much memory we will use
+    for n in range(uind.shape[0]):
+        locations = np.zeros((count[n], 6), 'int64')
+        left = np.zeros((count[n], 3), 'int64')
+        all_locations.append(locations)
+        all_lefts.append(left)
+        afiles[n] = <np.int64_t *> locations.data
+        alefts[n] = <np.int64_t *> left.data
+        li[n] = 0
+    
+    cdef int fi
+    #now arrange all_locations and all_lefts sequentially
+    #such that when they return to python
+    #the 1d array mutates into a list of lists?
+    for i in range(ind.shape[0]):
+        mi = ind[i]
+        for n in range(uind.shape[0]):
+            if uind[n] == mi:
+                for fi in range(3):
+                    alefts[n][li[n] * 3 + fi] = lefts[i, fi]
+                for fi in range(6):
+                    afiles[n][li[n] * 6 + fi] = files[i, fi]
+                li[n] += 1
+                break
+    free(afiles)
+    free(alefts)
+    return all_locations, all_lefts
+
+def recursive_patch_splitting(ProtoSubgrid psg,
+        np.ndarray[np.int64_t, ndim=1] dims,
+        np.ndarray[np.int64_t, ndim=1] ind,
+        np.ndarray[np.int64_t, ndim=2] left_index,
+        np.ndarray[np.int64_t, ndim=2] fl,
+        int num_deep = 0,
+        float min_eff = 0.1,
+        int use_center=0,
+        long split_on_vol = 0):
+    cdef ProtoSubgrid L, R
+    cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
+    cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
+    cdef int tt, ax, fp, i, j, k, gi
+    cdef int tr[3]
+    cdef long volume  =0
+    cdef int max_depth = 40
+    volume = dims[0]*dims[1]*dims[2]
+    if split_on_vol>0:
+        if volume < split_on_vol:
+            return [psg]
+    if num_deep > max_depth:
+        psg.efficiency = min_eff
+        return [psg]
+    if (psg.efficiency > min_eff or psg.efficiency < 0.0):
+        return [psg]
+    if not use_center:    
+        psg.find_split(tr) #default
+    else:
+        psg.find_split_center(tr)    
+        
+    tt = tr[0]
+    ax = tr[1]
+    fp = tr[2]
+    if (fp % 2) != 0:
+        if dims[ax] != fp + 1:
+            fp += 1
+        else:
+            fp -= 1
+    dims_l = dims.copy()
+    dims_l[ax] = fp
+    li_l = ind.copy()
+    for i in range(3):
+        if dims_l[i] <= 0: return [psg]
+    dims_r = dims.copy()
+    dims_r[ax] -= fp
+    li_r = ind.copy()
+    li_r[ax] += fp
+    for i in range(3):
+        if dims_r[i] <= 0: return [psg]
+    L = ProtoSubgrid(li_l, dims_l, left_index, fl)
+    if L.efficiency > 1.0: raise RuntimeError
+    if L.efficiency <= 0.0: rv_l = []
+    elif L.efficiency < min_eff:
+        rv_l = recursive_patch_splitting(L, dims_l, li_l,
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
+    else:
+        rv_l = [L]
+    R = ProtoSubgrid(li_r, dims_r, left_index, fl)
+    if R.efficiency > 1.0: raise RuntimeError
+    if R.efficiency <= 0.0: rv_r = []
+    elif R.efficiency < min_eff:
+        rv_r = recursive_patch_splitting(R, dims_r, li_r,
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
+    else:
+        rv_r = [R]
+    return rv_r + rv_l

File yt/frontends/artio/api.py

View file
+"""
+API for yt.frontends.ramses
+
+Author: Matthew Turk <matthewturk@gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi@gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith@gmail.com>
+Affiliation: MSU
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      RAMSESStaticOutput
+
+from .fields import \
+      RAMSESFieldInfo, \
+      add_ramses_field
+
+from .io import \
+      IOHandlerRAMSES

File yt/frontends/artio/artio_caller.pyx

View file
+#struct artio_context_struct {
+#        MPI_Comm comm;
+#};
+cdef struct artio_context_struct: #in artio_mpi.h MPI_Comm
+    int comm
+
+#typedef struct artio_context_struct * artio_context;
+ctypedef artio_context_struct * artio_context
+
+cdef extern from "artio_headers/artio.c":
+
+     ctypedef int int64_t 
+
+     ctypedef struct param: 
+         int key_length
+         char key[64]
+         int val_length
+         int type 
+         char *value
+         
+     ctypedef struct list: 
+         param * head
+         param * tail
+         param * cursor
+         int iterate_flag 
+         int endian_swap 
+
+     #    artio_internal.h
+     ctypedef struct artio_particle_file:
+        #	artio_fh *ffh
+        int num_particle_files
+        int64_t *file_sfc_index
+        int64_t cache_sfc_begin
+        int64_t cache_sfc_end
+        int64_t *sfc_offset_table
+        
+        #/* maintained for consistency and user-error detection */
+        int num_species
+        int cur_file
+        int cur_species
+        int cur_particle
+        int64_t cur_sfc
+        int *num_primary_variables
+        int *num_secondary_variables
+        int *num_particles_per_species
+        
+     ctypedef struct artio_grid_file:
+        #        artio_fh *ffh
+        int num_grid_variables
+        int num_grid_files
+        int64_t *file_sfc_index
+        int64_t cache_sfc_begin
+        int64_t cache_sfc_end
+        int64_t *sfc_offset_table
+        
+        int file_max_level
+        #/* maintained for consistency and user-error detection */
+        int cur_file
+        int cur_num_levels
+        int cur_level
+        int cur_octs
+        int64_t cur_sfc
+        int *octs_per_level
+        
+        
+     ctypedef struct artio_file:
+        char file_prefix[256]
+        int endian_swap
+        int open_type
+        int open_mode
+        int rank
+        int num_procs
+        artio_context context #artio_mpi.h MPI_Comm
+        
+        int64_t *proc_sfc_index
+        int64_t proc_sfc_begin
+        int64_t proc_sfc_end
+        int64_t num_root_cells
+        
+        list param_list
+        artio_grid_file grid
+        artio_particle_file particle
+        
+        
+     artio_file artio_fileset_open(char * file_prefix, int type, artio_context context) 
+
+     void wrap_artio_fileset_open(char *file_prefix, int type)
+   
+
+#python only passes numbers and strings... no pointers or structures
+cpdef read_header(char * file_prefix, int type): 
+     wrap_artio_fileset_open(file_prefix, type)
+        
+

File yt/frontends/artio/artio_headers/artio.c

View file
+/*
+ * artio.c
+ *
+ *  Created on: Feb 21, 2010
+ *  Author: Yongen Yu
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <math.h>
+
+#include "artio.h"
+#include "artio_internal.h"
+
+#ifdef ARTIO_MPI
+#include "artio_mpi.h"
+#endif
+
+artio_file artio_fileset_open(char * file_prefix, int type, artio_context context) ;
+
+/* simpler function for cpdef */
+void wrap_artio_fileset_open(char * file_prefix, int type) {
+    artio_file junk;
+    artio_context context=NULL;
+    printf("file_prefix %s type %d\n",file_prefix, type);
+    junk = artio_fileset_open(file_prefix, type, context);
+}
+
+artio_file artio_fileset_open(char * file_prefix, int type, artio_context context) {
+	artio_fh head_fh;
+	char filename[256];
+	int my_rank, num_procs;
+	int re;
+
+	artio_file handle = (artio_file)malloc(sizeof(struct artio_file_struct));
+	artio_parameter_list_init(&handle->param_list);
+
+#ifdef ARTIO_MPI
+	if(context == NULL)
+	  {
+	    fprintf(stderr,"Context cannot be NULL in the MPI mode.\n");
+	    return NULL;
+	  }
+	MPI_Comm_size(context->comm, &num_procs);
+	MPI_Comm_rank(context->comm, &my_rank);
+#else
+	num_procs = 1;
+	my_rank = 0;
+#endif /* MPI */
+
+	handle->open_mode = ARTIO_FILESET_READ;
+	handle->open_type = type;
+
+	handle->rank = my_rank;
+	handle->num_procs = num_procs;
+	handle->context = context;
+	strncpy(handle->file_prefix, file_prefix, 255);
+
+	/* open header file */
+	snprintf(filename, 255, "%s.art", file_prefix);
+	head_fh = artio_file_fopen(filename, 
+			ARTIO_MODE_READ | ARTIO_MODE_ACCESS | ARTIO_MODE_DIRECT, context);
+
+	if ( head_fh == NULL ) {
+            fprintf(stderr,"snl quitting fileset open: failed to open header\n");
+		return NULL;
+	}
+
+	re = artio_parameter_read(head_fh, &handle->param_list);
+	if ( re != ARTIO_SUCCESS ) {
+            fprintf(stderr,"snl quitting fileset open: failed to read header\n");
+		return NULL;
+	}
+
+	artio_file_fclose(head_fh);
+
+	artio_parameter_get_long(handle, "num_root_cells", &handle->num_root_cells);
+        printf("num_root_cells %d\n",handle->num_root_cells); //snl temporary
+
+	/* default to accessing all sfc indices */
+	handle->proc_sfc_begin = 0;
+	handle->proc_sfc_end = handle->num_root_cells-1;
+	handle->proc_sfc_index = NULL;
+
+	/* open data files */
+	if (type & ARTIO_OPEN_PARTICLES) {
+		re = artio_particle_open(handle);
+		if ( re != ARTIO_SUCCESS ) {
+			return NULL;
+		}
+	}
+
+	if (type & ARTIO_OPEN_GRID) {
+		re = artio_grid_open(handle);
+		if ( re != ARTIO_SUCCESS ) {
+			return NULL;
+		}
+	}
+
+	return handle;
+}
+
+artio_file artio_fileset_create(char * file_prefix, int64_t root_cells, 
+		int64_t proc_sfc_begin, int64_t proc_sfc_end, artio_context context) {
+	int my_rank, num_procs;
+	int64_t *proc_sfc_index;
+	artio_file handle;
+
+#ifdef ARTIO_MPI
+	if(context == NULL)
+	  {
+	    fprintf(stderr,"Context cannot be NULL in the MPI mode.\n");
+	    return NULL;
+	  }
+
+	MPI_Comm_size(context->comm, &num_procs);
+	MPI_Comm_rank(context->comm, &my_rank);
+
+	if ( proc_sfc_begin < 0 || proc_sfc_end > root_cells ) {
+		return NULL;
+	}
+#else
+	num_procs = 1;
+	my_rank = 0;
+
+	if ( proc_sfc_begin != 0 || proc_sfc_end != root_cells-1 ) {
+		return NULL;
+	}
+#endif /* MPI */
+
+	handle = (artio_file)malloc(sizeof(struct artio_file_struct));
+	artio_parameter_list_init(&handle->param_list);
+
+	handle->open_mode = ARTIO_FILESET_WRITE;
+	handle->open_type = 0;
+
+	handle->rank = my_rank;
+	handle->num_procs = num_procs;
+	handle->context = context;
+	handle->num_root_cells = root_cells;
+
+	strncpy(handle->file_prefix, file_prefix, 255);
+
+	proc_sfc_index = (int64_t*)malloc((num_procs+1)*sizeof(int64_t));
+	if ( proc_sfc_index == NULL ) {
+		fprintf(stderr, "ERROR ALLOCATING MEMORY!\n");
+		exit(1);
+	}
+#ifdef ARTIO_MPI
+	MPI_Allgather( &proc_sfc_begin, 1, MPI_LONG_LONG, proc_sfc_index, 1, MPI_LONG_LONG, context->comm );
+#else
+	proc_sfc_index[0] = 0;
+#endif /* ARTIO_MPI */
+	proc_sfc_index[handle->num_procs] = root_cells;
+	handle->proc_sfc_index = proc_sfc_index;
+	handle->proc_sfc_begin = proc_sfc_begin;
+	handle->proc_sfc_end = proc_sfc_end;
+
+	artio_parameter_set_long(handle, "num_root_cells", root_cells);
+
+	return handle;
+}
+
+int artio_fileset_close(artio_file handle) {
+	char header_filename[256];
+	artio_fh head_fh;
+
+	if (handle->open_mode == ARTIO_FILESET_WRITE) {
+		snprintf(header_filename, 255, "%s.art", handle->file_prefix);
+		head_fh = artio_file_fopen(header_filename, 
+				ARTIO_MODE_WRITE | ARTIO_MODE_DIRECT |
+					   ((handle->rank == 0) ? ARTIO_MODE_ACCESS : 0), handle->context);
+
+		if (0 == handle->rank) {
+			artio_parameter_write(head_fh, &handle->param_list);
+		}
+
+		artio_file_fclose(head_fh);
+		free(handle->proc_sfc_index);
+	}
+
+	if (handle->open_type & ARTIO_OPEN_GRID) {
+		artio_grid_close(handle);
+		free(handle->grid);
+	}
+
+	if (handle->open_type & ARTIO_OPEN_PARTICLES) {
+		artio_particle_close(handle);
+		free(handle->particle);
+	}
+
+	artio_parameter_free_list(&handle->param_list);
+	free(handle);
+
+	return ARTIO_SUCCESS;
+}

File yt/frontends/artio/artio_headers/artio.h

View file
+/*
+ * artio.h
+ *
+ *  Created on: Feb 21, 2010
+ *      Author: Yongen Yu
+ *  Modified: Jun 6, 2010 - Doug Rudd
+ *            Nov 18, 2010 - Doug Rudd
+ */
+
+#ifndef __ARTIO_H__
+#define __ARTIO_H__
+
+#include <stdint.h>
+
+#define ARTIO_FILESET_READ			0
+#define ARTIO_FILESET_WRITE			1
+
+#define ARTIO_OPEN_PARTICLES			1
+#define ARTIO_OPEN_GRID				2
+
+#define ARTIO_READ_LEAFS			1
+#define ARTIO_READ_REFINED			2
+#define	ARTIO_READ_ALL				3
+
+/* allocation strategy */
+#define ARTIO_ALLOC_EQUAL_SFC      		0
+#define ARTIO_ALLOC_EQUAL_PROC     		1
+#define ARTIO_ALLOC_MAX_FILE_SIZE  		2
+
+#define ARTIO_TYPE_STRING   0
+#define ARTIO_TYPE_CHAR     1
+#define ARTIO_TYPE_INT      2
+#define ARTIO_TYPE_FLOAT    3
+#define ARTIO_TYPE_DOUBLE   4
+#define ARTIO_TYPE_LONG     5
+
+/* error codes */
+#define ARTIO_SUCCESS				0
+
+#define ARTIO_ERR_PARAM_NOT_FOUND		1
+#define ARTIO_ERR_PARAM_INVALID_LENGTH		2
+#define ARTIO_ERR_PARAM_TYPE_MISMATCH		3
+#define ARTIO_ERR_PARAM_LENGTH_MISMATCH		4
+#define ARTIO_ERR_PARAM_LENGTH_INVALID		5
+#define ARTIO_ERR_PARAM_DUPLICATE		6
+
+#define ARTIO_ERR_INVALID_FILESET_MODE		100
+#define	ARTIO_ERR_INVALID_FILE_NUMBER		101
+#define ARTIO_ERR_INVALID_FILE_MODE		102
+#define ARTIO_ERR_INVALID_SFC_RANGE		103
+#define ARTIO_ERR_INVALID_SFC			104
+#define ARTIO_ERR_INVALID_STATE			105
+#define ARTIO_ERR_INVALID_SEEK			106
+#define ARTIO_ERR_INVALID_OCT_LEVELS		107
+#define ARTIO_ERR_INVALID_SPECIES		108
+#define ARTIO_ERR_INVALID_ALLOC_STRATEGY	109
+#define ARTIO_ERR_INVALID_LEVEL			110
+#define ARTIO_ERR_INVALID_PARAM_LIST		111
+#define ARTIO_ERR_INVALID_DATATYPE		112
+
+#define ARTIO_ERR_DATA_EXISTS			200
+#define ARTIO_ERR_INSUFFICIENT_DATA		201
+#define ARTIO_ERR_FILE_CREATE			202
+#define ARTIO_ERR_PARTICLE_FILE_NOT_FOUND	203
+#define ARTIO_ERR_GRID_FILE_NOT_FOUND		204
+#define ARTIO_ERR_PARTICLE_CORRUPTED		205
+#define ARTIO_ERR_GRID_CORRUPTED		206
+
+#define ARTIO_ERR_PARAM_CORRUPTED		207
+#define ARTIO_ERR_PARAM_CORRUPTED_MAGIC		208
+
+#define ARTIO_PARAMETER_EXHAUSTED		300
+
+typedef struct artio_file_struct * artio_file;
+typedef struct artio_param_list * artio_parameters;
+typedef struct artio_context_struct * artio_context;
+
+/*
+ * Description: Open the file
+ *
+ *  filename			The file prefix
+ *  type			combination of ARTIO_OPEN_PARTICLES and ARTIO_OPEN_GRID flags
+ */
+artio_file artio_fileset_open(char * file_name, int type, artio_context context);
+
+/**
+ * Description: Create fileset and begin populating header information
+ *
+ *  file_name			file name of refined cells
+ *  root_cells			the number of root level cells
+ *  proc_sfc_begin-end		the range of local space-filling-curve indices
+ *  handle			the artio file handle
+ *
+ */
+artio_file artio_fileset_create(char * file_prefix, 
+        int64_t root_cells, int64_t proc_sfc_begin, int64_t proc_sfc_end, artio_context context);
+
+/*
+ * Description	Close the file
+ */
+int artio_fileset_close(artio_file handle);
+
+/* public parameter interface */
+int artio_parameter_iterate( artio_file handle, char *key, int *type, int *length );
+int artio_parameter_get_array_length(artio_file handle, char * key, int *length);
+
+void artio_parameter_set_int(artio_file handle, char * key, int32_t value);
+int artio_parameter_get_int(artio_file handle, char * key, int32_t * value);
+
+void artio_parameter_set_int_array(artio_file handle, char * key, int length,
+		int32_t *values);
+int artio_parameter_get_int_array(artio_file handle, char * key, int length,
+		int32_t *values);
+
+void artio_parameter_set_string(artio_file handle, char * key, char * value);
+int artio_parameter_get_string(artio_file handle, char * key, char * value, int max_length);
+
+void artio_parameter_set_string_array(artio_file handle, char * key,
+		int length, char ** values);
+int artio_parameter_get_string_array(artio_file handle, char * key,
+		int length, char ** values, int max_length);
+
+void artio_parameter_set_float(artio_file handle, char * key, float value);
+int artio_parameter_get_float(artio_file handle, char * key, float * value);
+
+void artio_parameter_set_float_array(artio_file handle, char * key,
+		int length, float *values);
+int artio_parameter_get_float_array(artio_file handle, char * key,
+		int length, float * values);
+
+void artio_parameter_set_double(artio_file handle, char * key, double value);
+int  artio_parameter_get_double(artio_file handle, char * key, double * value);
+
+void artio_parameter_set_double_array(artio_file handle, char * key,
+		int length, double * values);
+int artio_parameter_get_double_array(artio_file handle, char * key,
+        int length, double *values);
+
+void artio_parameter_set_long(artio_file handle, char * key, int64_t value);
+int artio_parameter_get_long(artio_file handle, char * key, int64_t *value);
+
+void artio_parameter_set_long_array(artio_file handle, char * key,
+        int length, int64_t *values);
+int artio_parameter_get_long_array(artio_file handle, char * key,
+        int length, int64_t *values);
+
+/* public grid interface */
+typedef void (* GridCallBack)(float * variables, int level, int refined,
+		int64_t sfc_index);
+
+/*
+ * Description:	Add a grid component to a fileset open for writing
+ *
+ *  handle			The fileset handle
+ *  num_grid_files		The number of grid files to create
+ *  allocation_strategy		How to apportion sfc indices to each grid file
+ *  num_grid_variables		The number of variables per cell
+ *  grid_variable_labels	Identifying labels for each variable
+ *  num_levels_per_root_tree	Maximum tree depth for each oct tree
+ *  num_octs_per_root_tree	Total octs in each oct tree
+ */
+int artio_fileset_add_grid(artio_file handle,
+        int num_grid_files, int allocation_strategy,
+        int num_grid_variables,
+        char ** grid_variable_labels,
+        int * num_levels_per_root_tree,
+        int * num_octs_per_root_tree );
+
+/*
+ * Description:	Output the variables of the root level cell and the hierarchy of the Oct tree correlated with this root level cell
+ *
+ *  handle			The File handle
+ *  sfc				The sfc index of root cell
+ *  variables			The variables of the root level cell
+ *  level			The depth of the Oct tree correlated to the root level cell
+ *  num_level_octs		The array store the number of Oct nodes each level
+ */
+int artio_grid_write_root_cell_begin(artio_file handle, int64_t sfc, 
+		float * variables, int level, int * num_octs_per_level);
+
+/*
+ * Description:	Do something at the end of writing the root level cell
+ */
+int artio_grid_write_root_cell_end(artio_file handle);
+
+/*
+ * Description:	Do something at the beginning of each level
+ */
+int artio_grid_write_level_begin(artio_file handle, int level );
+
+/*
+ * Description:	Do something at the end of each level
+ */
+int artio_grid_write_level_end(artio_file handle);
+
+/*
+ * Description:	Output the data of a special oct tree node to the file
+ *
+ *  handle			The handle of the file
+ *  variables 			The array recording the variables of the eight cells belonging to this Octree node.
+ */
+int artio_grid_write_oct(artio_file handle, float *variables, int *refined);
+
+/*
+ * Description:	Read the variables of the root level cell and the hierarchy of the Octtree
+ *              correlated with this root level cell
+ *
+ *  handle			The File handle
+ *  variables			The variables of the root level cell
+ *  level 			The depth of the OCT tree
+ *  num_octs_per_level		The number of node of each oct level
+ *
+ */
+int artio_grid_read_root_cell_begin(artio_file handle, int64_t sfc, float *variables,
+		int *num_tree_levels, int *num_octs_per_level);
+
+/*
+ * Description:	Do something at the end of reading the root level cell
+ */
+int artio_grid_read_root_cell_end(artio_file handle);
+
+/*
+ * Description:	Do something at the beginning of each level
+ */
+int artio_grid_read_level_begin(artio_file handle, int level );
+
+/*
+ * Description:	Do something at the end of each level
+ */
+int artio_grid_read_level_end(artio_file handle);
+
+/*
+ * Description:	Read the data of a special oct tree node from the file
+ */
+int artio_grid_read_oct(artio_file handle, float *variables, int *refined);
+
+int artio_grid_cache_sfc_range(artio_file handle, int64_t sfc_start, int64_t sfc_end);
+
+/*
+ * Description:	Read a segment of oct nodes
+ *
+ *  handle			file pointer
+ *  sfc1			the start sfc index
+ *  sfc2			the end sfc index
+ *  max_level_to_read		max level to read for each oct tree
+ *  option			1. refined nodes; 2 leaf nodes; 3 all nodes
+ *  callback			callback function
+ */
+int artio_grid_read_sfc_range(artio_file handle, int64_t sfc1, int64_t sfc2, int min_level_to_read,
+		int max_level_to_read, int options, GridCallBack callback);
+
+
+typedef void (* ParticleCallBack)(int64_t pid, 
+		double *primary_variables, float *secondary_variables, 
+		int species, int subspecies, int64_t sfc_index);
+
+/**
+ *  header			head file name
+ *  num_particle_files		the number of files to record refined cells
+ *  allocation_strategy
+ *  num_species			number of particle species
+ *  species_labels		string identifier for each species
+ *  handle			the artio file handle
+ *
+ */
+int artio_fileset_add_particles(artio_file handle, 
+        int num_particle_files, int allocation_strategy,
+        int num_species, char **species_labels,
+        int *num_primary_variables,
+        int *num_secondary_variables,
+        char ***primary_variable_labels_per_species,
+        char ***secondary_variable_labels_per_species,
+        int *num_particles_per_species_per_root_tree );
+
+/*
+ * Description:	Output the variables of the root level cell and the hierarchy of 
+ *                  the oct-tree correlated with this root level cell
+ *
+ *  handle			The File handle
+ *  sfc				The sfc index of root cell
+ *  variables			The variables of the root level cell
+ *  level			The depth of the Oct tree correlated to the root level cell
+ *  num_level_octs		The array store the number of Oct nodes each level
+ */
+int artio_particle_write_root_cell_begin(artio_file handle, int64_t sfc,
+		int *num_particles_per_species);
+
+/*
+ * Description:	Do something at the end of writing the root level cell
+ */
+int artio_particle_write_root_cell_end(artio_file handle);
+
+/*
+ * Description:	Do something at the beginning of each level
+ */
+int artio_particle_write_species_begin(artio_file handle, int species );
+
+/*
+ * Description:	Do something at the end of each level
+ */
+int artio_particle_write_species_end(artio_file handle);
+
+/*
+ * Description: Output the data of a special oct tree node to the file
+ *
+ *  handle			The handle of the file
+ *  variables 			The array recording the variables of the eight cells belonging to this Octree node.
+ */
+int artio_particle_write_particle(artio_file handle, int64_t pid, int subspecies, 
+			double* primary_variables, float *secondary_variables);
+
+/*
+ * Description:	Read the variables of the root level cell and the hierarchy of the Octtree
+ *              correlated with this root level cell
+ *
+ *  handle			The File handle
+ *  variables			The variables of the root level cell
+ *  level 			The depth of the OCT tree
+ *  num_octs_per_level		The number of node of each oct level
+ *
+ */
+int artio_particle_read_root_cell_begin(artio_file handle, int64_t sfc, 
+			int * num_particle_per_species);
+
+/*
+ * Description:	Do something at the end of reading the root level cell
+ */
+int artio_particle_read_root_cell_end(artio_file handle);
+
+/*
+ * Description:	Do something at the beginning of each level
+ */
+int artio_particle_read_species_begin(artio_file handle, int species );
+
+/*
+ * Description:  Do something at the end of each level
+ */
+int artio_particle_read_species_end(artio_file handle);
+
+/*
+ * Description:	Read the data of a single particle from the file
+ */
+int artio_particle_read_particle(artio_file handle, int64_t *pid, int *subspecies,
+			double *primary_variables, float *secondary_variables);
+
+int artio_particle_cache_sfc_range(artio_file handle, int64_t sfc_start, int64_t sfc_end);
+
+/*
+ * Description: Read a segment of particles
+ *
+ *  handle			file pointer
+ *  sfc1			the start sfc index
+ *  sfc2			the end sfc index
+ *  start_species		the first particle species to read
+ *  end_species			the last particle species to read
+ *  callback			callback function
+ */
+int artio_particle_read_sfc_range(artio_file handle, 
+		int64_t sfc1, int64_t sfc2, 
+		int start_species, int end_species,
+		ParticleCallBack callback);
+
+#endif /* __ARTIO_H__ */

File yt/frontends/artio/artio_headers/artio_endian.c

View file
+#include "artio_endian.h"
+
+#include <stdint.h>
+
+void artio_int_swap(int32_t *src, int count) {
+	unsigned char c1, c2, c3, c4;
+	int i;
+
+	for ( i = 0; i < count; i++ ) {
+		c1 = src[i] & 255;
+		c2 = (src[i] >> 8) & 255;
+		c3 = (src[i] >> 16) & 255;
+		c4 = (src[i] >> 24) & 255;
+		src[i] = ((int32_t) c1 << 24) + ((int32_t) c2 << 16) + ((int32_t) c3 << 8) + c4;
+	}
+}
+
+void artio_float_swap(float *src, int count) {
+	int i;
+	union {
+		float f;
+		unsigned char c[4];
+	} d1, d2;
+
+	for ( i = 0; i < count; i++ ) {
+		d1.f = src[i];
+		d2.c[0] = d1.c[3];
+		d2.c[1] = d1.c[2];
+		d2.c[2] = d1.c[1];
+		d2.c[3] = d1.c[0];
+		src[i] = d2.f;
+	}
+}
+
+void artio_double_swap(double *src, int count) {	
+	int i;
+	union
+	{
+		double d;
+		unsigned char c[8];
+	} d1, d2;
+
+	for ( i = 0; i < count; i++ ) {
+		d1.d = src[i];
+		d2.c[0] = d1.c[7];
+		d2.c[1] = d1.c[6];
+		d2.c[2] = d1.c[5];
+		d2.c[3] = d1.c[4];
+		d2.c[4] = d1.c[3];
+		d2.c[5] = d1.c[2];
+		d2.c[6] = d1.c[1];
+		d2.c[7] = d1.c[0];
+		src[i] = d2.d;
+	}
+}
+
+void artio_long_swap(int64_t *src, int count) {
+	int i;
+	union
+	{
+		int64_t d;
+		unsigned char c[8];
+	} d1, d2;
+
+	for ( i = 0; i < count; i++ ) {
+		d1.d = src[i];
+		d2.c[0] = d1.c[7];
+		d2.c[1] = d1.c[6];
+		d2.c[2] = d1.c[5];
+		d2.c[3] = d1.c[4];
+		d2.c[4] = d1.c[3];
+		d2.c[5] = d1.c[2];
+		d2.c[6] = d1.c[1];
+		d2.c[7] = d1.c[0];
+		src[i] = d2.d;
+	}
+}

File yt/frontends/artio/artio_headers/artio_endian.h

View file
+#ifndef __ARTIO_EDIAN_H__
+#define __ARTIO_EDIAN_H__
+
+#include <stdint.h>
+
+void artio_int_swap(int32_t *src, int count);
+void artio_float_swap(float *src, int count);
+void artio_double_swap(double *src, int count);
+void artio_long_swap(int64_t *src, int count);
+
+#endif /* __ARTIO_ENDIAN_H__ */

File yt/frontends/artio/artio_headers/artio_grid.c

View file
+/*
+ * artio_grid.c
+ *
+ *  Created on: May 10, 2011
+ *      Author: Yongen Yu
+ */
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>