Commits

Stephen Skory committed 3ebd7a4 Merge

Merge.

Comments (0)

Files changed (14)

 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
+with-xunit=1

yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py

             bin_table, self.bounds, self.field_names[:],
             truncate=True)
 
+
+
     def add_frequency_bin_field(self, ev_min, ev_max):
         """
         Add a new field to the FieldInfoContainer, which is an
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+            dd = {'H_NumberDensity' : np.log10(data["H_NumberDensity"]),
                   'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',
     ipython_notebook = 'False',
+    answer_testing_tolerance = '3',
+    answer_testing_bitwise = 'False',
+    gold_standard_filename = 'gold003',
+    local_standard_filename = 'local001'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

yt/data_objects/data_containers.py

             ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
-            f = open(filename, "w")
+            if hasattr(filename, "write"): f = filename
             for v1 in verts:
                 f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2]))
             for i in range(len(verts)/3):
                 f.write("f %s %s %s\n" % (i*3+1, i*3+2, i*3+3))
-            f.close()
+            if not hasattr(filename, "write"): f.close()
         if sample_values is not None:
             return verts, samples
         return verts

yt/data_objects/hierarchy.py

             mylog.warning("Refine by something other than two: reverting to"
                         + " overlap_proj")
             self.proj = self.overlap_proj
+        if self.pf.dimensionality < 3 and hasattr(self, 'proj') and \
+            hasattr(self, 'overlap_proj'):
+            mylog.warning("Dimensionality less than 3: reverting to"
+                        + " overlap_proj")
+            self.proj = self.overlap_proj
+
         self.object_types.sort()
 
     def _setup_unknown_fields(self):

yt/frontends/enzo/answer_testing_support.py

 def standard_small_simulation(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [None]
+    tolerance = ytcfg.getint("yt", "answer_testing_tolerance")
+    bitwise = ytcfg.getboolean("yt", "answer_testing_bitwise")
     for field in fields:
-        yield GridValuesTest(pf_fn, field)
+        if bitwise:
+            yield GridValuesTest(pf_fn, field)
         if 'particle' in field: continue
         for ds in dso:
             for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield ProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
-                        ds, decimals=3)
+                        ds, decimals=tolerance)
             yield FieldValuesTest(
-                    pf_fn, field, ds, decimals=3)
+                    pf_fn, field, ds, decimals=tolerance)
                     
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 

yt/frontends/enzo/fields.py

           function=_NumberDensity,
           convert_function=_ConvertNumberDensity)
 
+def _H_NumberDensity(field, data):
+    field_data = np.zeros(data["Density"].shape,
+                          dtype=data["Density"].dtype)
+    if data.pf.parameters["MultiSpecies"] == 0:
+        field_data += data["Density"] * \
+          data.pf.parameters["HydrogenFractionByMass"]
+    if data.pf.parameters["MultiSpecies"] > 0:
+        field_data += data["HI_Density"]
+        field_data += data["HII_Density"]
+    if data.pf.parameters["MultiSpecies"] > 1:
+        field_data += data["HM_Density"]
+        field_data += data["H2I_Density"]
+        field_data += data["H2II_Density"]
+    if data.pf.parameters["MultiSpecies"] > 2:
+        field_data += data["HDI_Density"] / 2.0
+    return field_data
+add_field("H_NumberDensity", units=r"\rm{cm}^{-3}",
+          function=_H_NumberDensity,
+          convert_function=_ConvertNumberDensity)
+
+
 # Now we add all the fields that we want to control, but we give a null function
 # This is every Enzo field we can think of.  This will be installation-dependent,
 

yt/frontends/stream/data_structures.py

 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None):
+                 fields, io = None, particle_types = {}):
         self.left_edges = left_edges
         self.right_edges = right_edges
         self.dimensions = dimensions
         self.num_grids = self.levels.size
         self.fields = fields
         self.io = io
-
+        self.particle_types = particle_types
+            
     def get_fields(self):
         return self.fields.all_fields
 
+    def get_particle_type(self, field) :
+
+        if self.particle_types.has_key(field) :
+            return self.particle_types[field]
+        else :
+            return False
+        
 class StreamHierarchy(AMRHierarchy):
 
     grid = StreamGrid
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
+            ptype = self.stream_handler.get_particle_type(field)
             # Note that we call add_field on the field_info directly.  This
             # will allow the same field detection mechanism to work for 1D, 2D
             # and 3D fields.
             self.pf.field_info.add_field(
-                    field, lambda a, b: None,
+                    field, lambda a, b: None, particle_type = ptype,
                     convert_function=cf, take_log=False)
 
     def _parse_hierarchy(self):
         else:
             self.io = io_registry[self.data_style](self.stream_handler)
 
+    def update_data(self, data) :
+
+        """
+        Update the stream data with a new data dict. If fields already exist,
+        they will be replaced, but if they do not, they will be added. Fields
+        already in the stream but not part of the data dict will be left
+        alone. 
+        """
+        
+        particle_types = set_particle_types(data[0])
+
+        for key in data[0].keys() :
+            if key is "number_of_particles": continue
+            self.stream_handler.particle_types[key] = particle_types[key]
+            if key not in self.field_list:
+                self.field_list.append(key)
+
+        for i, grid in enumerate(self.grids) :
+            if data[i].has_key("number_of_particles") :
+                grid.NumberOfParticles = data[i].pop("number_of_particles")
+            for key in data[i].keys() :
+                self.stream_handler.fields[grid.id][key] = data[i][key]
+            
+        self._setup_unknown_fields()
+        self._detect_fields()
+        
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_fallback = StreamFieldInfo
     @property
     def all_fields(self): return self[0].keys()
 
+def set_particle_types(data) :
+
+    particle_types = {}
+    
+    for key in data.keys() :
+
+        if key is "number_of_particles": continue
+        
+        if len(data[key].shape) == 1:
+            particle_types[key] = True
+        else :
+            particle_types[key] = False
+    
+    return particle_types
+
+def assign_particle_data(pf, pdata) :
+
+    """
+    Assign particle data to the grids using find_points. This
+    will overwrite any existing particle data, so be careful!
+    """
+    
+    if pf.h.num_grids > 1 :
+
+        try :
+            x = pdata["particle_position_x"]
+            y = pdata["particle_position_y"]
+            z = pdata["particle_position_z"]
+        except:
+            raise KeyError("Cannot decompose particle data without position fields!")
+        
+        particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+        idxs = np.argsort(particle_grid_inds)
+        particle_grid_count = np.bincount(particle_grid_inds,
+                                          minlength=pf.h.num_grids)
+        particle_indices = np.zeros(pf.h.num_grids + 1, dtype='int64')
+        if pf.h.num_grids > 1 :
+            np.add.accumulate(particle_grid_count.squeeze(),
+                              out=particle_indices[1:])
+        else :
+            particle_indices[1] = particle_grid_count.squeeze()
+    
+        pdata.pop("number_of_particles")    
+        grid_pdata = []
+        
+        for i, pcount in enumerate(particle_grid_count) :
+            grid = {}
+            grid["number_of_particles"] = pcount
+            start = particle_indices[i]
+            end = particle_indices[i+1]
+            for key in pdata.keys() :
+                grid[key] = pdata[key][idxs][start:end]
+            grid_pdata.append(grid)
+
+    else :
+
+        grid_pdata = [pdata]
+        
+    pf.h.update_data(grid_pdata)
+                                        
 def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                      nprocs=1, sim_time=0.0, number_of_particles=0):
+                      nprocs=1, sim_time=0.0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
           disappointing or non-existent in most cases.
         * Particles may be difficult to integrate.
 
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
     Parameters
     ----------
     data : dict
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     sfh = StreamDictFieldHandler()
-
+    
+    if data.has_key("number_of_particles") :
+        number_of_particles = data.pop("number_of_particles")
+    else :
+        number_of_particles = int(0)
+    
+    if number_of_particles > 0 :
+        particle_types = set_particle_types(data)
+        pdata = {}
+        pdata["number_of_particles"] = number_of_particles
+        for key in data.keys() :
+            if len(data[key].shape) == 1 :
+                pdata[key] = data.pop(key)
+    else :
+        particle_types = {}
+    
     if nprocs > 1:
         temp = {}
         new_data = {}
         for key in data.keys():
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
-                decompose_array(data[key], psize, bbox)
+                             decompose_array(data[key], psize, bbox)
             grid_dimensions = np.array([grid.shape for grid in temp[key]],
                                        dtype="int32")
         for gid in range(nprocs):
         grid_dimensions,
         grid_levels,
         -np.ones(nprocs, dtype='int64'),
-        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
+        particle_types=particle_types
     )
 
     handler.name = "UniformGridData"
     box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    # Now figure out where the particles go
+
+    if number_of_particles > 0 :
+        assign_particle_data(spf, pdata)
+    
     return spf
 
 def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                   sim_time=0.0, number_of_particles=0):
+                   sim_time=0.0):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
     grid_data : list of dicts
         This is a list of dicts.  Each dict must have entries "left_edge",
         "right_edge", "dimensions", "level", and then any remaining entries are
-        assumed to be fields.  This will be modified in place and can't be
-        assumed to be static..
+        assumed to be fields.  They also may include a particle count, otherwise
+        assumed to be zero. This will be modified in place and can't be
+        assumed to be static.
     domain_dimensions : array_like
         This is the domain dimensions of the grid
     sim_unit_to_cm : float
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
     ...     dict(left_edge = [0.0, 0.0, 0.0],
     ...          right_edge = [1.0, 1.0, 1.],
     ...          level = 0,
-    ...          dimensions = [32, 32, 32]),
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ...     dict(left_edge = [0.25, 0.25, 0.25],
     ...          right_edge = [0.75, 0.75, 0.75],
     ...          level = 1,
-    ...          dimensions = [32, 32, 32])
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ... ]
     ... 
     >>> for g in grid_data:
     grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    number_of_particles = np.zeros((ngrids,1), dtype='int64')
     sfh = StreamDictFieldHandler()
     for i, g in enumerate(grid_data):
         grid_left_edges[i,:] = g.pop("left_edge")
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
+        if g.has_key("number_of_particles") :
+            number_of_particles[i,:] = g.pop("number_of_particles")  
         sfh[i] = g
-
+            
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
         None, # parent_ids is none
-        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        number_of_particles,
         np.zeros(ngrids).reshape((ngrids,1)),
         sfh,
+        particle_types=set_particle_types(grid_data[0])
     )
 
     handler.name = "AMRGridData"
     >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
     >>> pf = refine_amr(ug, rc, fo, 5)
     """
+
+    # If we have particle data, set it aside for now
+
+    number_of_particles = np.sum([grid.NumberOfParticles
+                                  for grid in base_pf.h.grids])
+
+    if number_of_particles > 0 :
+        pdata = {}
+        for field in base_pf.h.field_list :
+            if base_pf.field_info[field].particle_type :
+                pdata[field] = np.concatenate([grid[field]
+                                               for grid in base_pf.h.grids])
+        pdata["number_of_particles"] = number_of_particles
+        
     last_gc = base_pf.h.num_grids
     cur_gc = -1
     pf = base_pf    
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                gd[field] = g[field]
+                if not pf.field_info[field].particle_type :
+                    gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
             fg = FlaggingGrid(g, refinement_criteria)
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    gd[field] = grid[field]
+                    if not pf.field_info[field].particle_type :
+                        gd[field] = grid[field]
                 grid_data.append(gd)
         pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
         cur_gc = pf.h.num_grids
+
+    # Now reassign particle data to grids
+
+    if number_of_particles > 0 : assign_particle_data(pf, pdata)
+    
     return pf

yt/frontends/stream/io.py

     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
-        sl = tuple(reversed(sl))
-        tr = self.fields[grid.id][field][sl].swapaxes(0,2)
+        sl = tuple(sl)
+        tr = self.fields[grid.id][field][sl]
         # In-place unit conversion requires we return a copy
         return tr.copy()
 

yt/frontends/stream/tests/test_stream_particles.py

+import numpy as np
+from yt.testing import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+
+def setup() :
+    pass
+
+# Field information
+
+def test_stream_particles() :
+    
+    num_particles = 100000
+    domain_dims = (64, 64, 64)
+    dens = np.random.random(domain_dims) 
+    x = np.random.uniform(size=num_particles)
+    y = np.random.uniform(size=num_particles)
+    z = np.random.uniform(size=num_particles)
+    m = np.ones((num_particles))
+
+    # Field operators and cell flagging methods
+
+    fo = []
+    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"Density": 2.0}))
+    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"Density": 20.0}))
+    rc = [fm.flagging_method_registry["overdensity"](1.0)]
+    
+    # Check that all of this runs ok without particles
+    
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
+    amr0 = refine_amr(ug0, rc, fo, 3)
+
+    grid_data = []
+    
+    for grid in amr0.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+    
+        for field in amr0.h.field_list :
+            
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+
+    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
+                        
+    # Now add particles
+
+    fields1 = {"Density": dens,
+               "particle_position_x": x,
+               "particle_position_y": y,
+               "particle_position_z": z,
+               "particle_mass": m,
+               "number_of_particles": num_particles}
+
+    fields2 = fields1.copy()
+
+    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
+    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.h.grids])
+    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.h.grids])
+    
+    assert number_of_particles1 == num_particles
+    assert number_of_particles1 == number_of_particles2
+
+    # Check to make sure the fields have been defined correctly
+    
+    assert ug1.field_info["particle_position_x"].particle_type
+    assert ug1.field_info["particle_position_y"].particle_type
+    assert ug1.field_info["particle_position_z"].particle_type
+    assert ug1.field_info["particle_mass"].particle_type
+    assert not ug1.field_info["Density"].particle_type
+
+    assert ug2.field_info["particle_position_x"].particle_type
+    assert ug2.field_info["particle_position_y"].particle_type
+    assert ug2.field_info["particle_position_z"].particle_type
+    assert ug2.field_info["particle_mass"].particle_type
+    assert not ug2.field_info["Density"].particle_type
+    
+    # Now refine this
+
+    amr1 = refine_amr(ug1, rc, fo, 3)
+    
+    grid_data = []
+    
+    for grid in amr1.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+
+        for field in amr1.h.field_list :
+
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+    
+    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)
+
+    # Check everything again
+
+    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.h.grids]
+    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.h.grids]
+    
+    assert np.sum(number_of_particles1) == num_particles
+    assert_equal(number_of_particles1, number_of_particles2)
+    
+    assert amr1.field_info["particle_position_x"].particle_type
+    assert amr1.field_info["particle_position_y"].particle_type
+    assert amr1.field_info["particle_position_z"].particle_type
+    assert amr1.field_info["particle_mass"].particle_type
+    assert not amr1.field_info["Density"].particle_type
+    
+    assert amr2.field_info["particle_position_x"].particle_type
+    assert amr2.field_info["particle_position_y"].particle_type
+    assert amr2.field_info["particle_position_z"].particle_type
+    assert amr2.field_info["particle_mass"].particle_type
+    assert not amr2.field_info["Density"].particle_type
+

yt/frontends/stream/tests/test_update_data.py

+from yt.testing import *
+from yt.data_objects.profiles import BinnedProfile1D
+from numpy.random import uniform
+
+def setup():
+    global pf
+    pf = fake_random_pf(64, nprocs=8)
+    pf.h
+    
+def test_update_data() :
+    dims = (32,32,32)
+    grid_data = [{"Temperature":uniform(size=dims)}
+                 for i in xrange(pf.h.num_grids)]
+    pf.h.update_data(grid_data)
+    prj = pf.h.proj(2, "Temperature")
+    prj["Temperature"]
+    dd = pf.h.all_data()
+    profile = BinnedProfile1D(dd, 10, "Density",
+                              dd["Density"].min(),
+                              dd["Density"].max())
+    profile.add_fields(["Temperature"])
+    profile["Temperature"]
+                              
 #from yt.frontends.maestro.api import \
 #    MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
 
+from yt.frontends.stream.api import \
+    StreamStaticOutput, StreamFieldInfo, add_stream_field, \
+    StreamHandler, load_uniform_grid, load_amr_grids
+
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods
 available_analysis_modules = get_available_modules()

yt/utilities/answer_testing/framework.py

 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
 
-_latest = "gold001"
+# Set the latest gold and local standard filenames
+_latest = ytcfg.get("yt", "gold_standard_filename")
+_latest_local = ytcfg.get("yt", "local_standard_filename")
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
 
 class AnswerTesting(Plugin):
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
-            default=_latest, help="The name of tests against which we will compare")
+        parser.add_option("--answer-name", dest="answer_name", metavar='str',
+            default=None, help="The name of the standard to store/compare against")
+        parser.add_option("--answer-store", dest="store_results", metavar='bool',
+            default=False, action="store_true",
+            help="Should we store this result instead of comparing?")
+        parser.add_option("--local", dest="local_results",
+            default=False, action="store_true", help="Store/load reference results locally?")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-store-name", dest="store_name", metavar='str',
-            default=None,
-            help="The name we'll call this set of tests")
-        parser.add_option("--local-store", dest="store_local_results",
-            default=False, action="store_true", help="Store/Load local results?")
 
     @property
     def my_version(self, version=None):
         if not self.enabled:
             return
         disable_stream_logging()
-        if options.store_name is not None:
-            self.store_results = True
-        # If the user sets the storage_name, then it means they are storing and
-        # not comparing, even if they set the compare_name (since it is set by default)
-            options.compare_name = None
-        else: 
-            self.store_results = False
-            options.store_name = self.my_version
-        from yt.config import ytcfg
+
+        # Parse through the storage flags to make sense of them
+        # and use reasonable defaults
+        # If we're storing the data, default storage name is local
+        # latest version
+        if options.store_results:
+            if options.answer_name is None:
+                self.store_name = _latest_local
+            else:
+                self.store_name = options.answer_name
+            self.compare_name = None
+        # if we're not storing, then we're comparing, and we want default
+        # comparison name to be the latest gold standard 
+        # either on network or local
+        else:
+            if options.answer_name is None:
+                if options.local_results:
+                    self.compare_name = _latest_local
+                else:
+                    self.compare_name = _latest
+            else:
+                self.compare_name = options.answer_name
+            self.store_name = self.my_version
+
+        self.store_results = options.store_results
+
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
-        if options.compare_name == "SKIP":
-            options.compare_name = None
-        elif options.compare_name == "latest":
-            options.compare_name = _latest
+        if self.compare_name == "SKIP":
+            self.compare_name = None
+        elif self.compare_name == "latest":
+            self.compare_name = _latest
             
         # Local/Cloud storage 
-        if options.store_local_results:
+        if options.local_results:
             storage_class = AnswerTestLocalStorage
             # Fix up filename for local storage 
-            if options.compare_name is not None:
-                options.compare_name = "%s/%s/%s" % \
-                    (os.path.realpath(options.output_dir), options.compare_name, 
-                     options.compare_name)
-            if options.store_name is not None:
+            if self.compare_name is not None:
+                self.compare_name = "%s/%s/%s" % \
+                    (os.path.realpath(options.output_dir), self.compare_name, 
+                     self.compare_name)
+            if self.store_name is not None:
                 name_dir_path = "%s/%s" % \
                     (os.path.realpath(options.output_dir), 
-                    options.store_name)
+                    self.store_name)
                 if not os.path.isdir(name_dir_path):
                     os.makedirs(name_dir_path)
-                options.store_name= "%s/%s" % \
-                        (name_dir_path, options.store_name)
+                self.store_name= "%s/%s" % \
+                        (name_dir_path, self.store_name)
         else:
             storage_class = AnswerTestCloudStorage
 
         # Initialize answer/reference storage
         AnswerTestingTest.reference_storage = self.storage = \
-                storage_class(options.compare_name, options.store_name)
+                storage_class(self.compare_name, self.store_name)
 
-        self.store_local_results = options.store_local_results
+        self.local_results = options.local_results
         global run_big_data
         run_big_data = options.big_data
 
         url = _url_path % (self.reference_name, pf_name)
         try:
             resp = urllib2.urlopen(url)
-            # This is dangerous, but we have a controlled S3 environment
-            data = resp.read()
-            rv = cPickle.loads(data)
         except urllib2.HTTPError as ex:
             raise YTNoOldAnswer(url)
-            mylog.warning("Missing %s (%s)", url, ex)
-            rv = default
+        else:
+            for this_try in range(3):
+                try:
+                    data = resp.read()
+                except:
+                    time.sleep(0.01)
+                else:
+                    # We were succesful
+                    break
+            else:
+                # Raise error if all tries were unsuccessful
+                raise YTCloudError(url)
+            # This is dangerous, but we have a controlled S3 environment
+            rv = cPickle.loads(data)
         self.cache[pf_name] = rv
         return rv
 
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer(self.storage_name)
+            if dd is None or self.description not in dd: 
+                raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
-        self.weight_field = field
+        self.weight_field = weight_field
         self.obj_type = obj_type
         self.decimals = decimals
 
             obj = self.create_obj(self.pf, self.obj_type)
         else:
             obj = None
+        if self.pf.domain_dimensions[self.axis] == 1: return None
         proj = self.pf.h.proj(self.axis, self.field,
                               weight_field=self.weight_field,
                               data_source = obj)
         return proj.field_data
 
     def compare(self, new_result, old_result):
+        if new_result is None:
+            return
         assert(len(new_result) == len(old_result))
         for k in new_result:
             assert (k in old_result)

yt/utilities/exceptions.py

         return "There is no old answer available.\n" + \
                str(self.path)
 
+class YTCloudError(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "Failed to retrieve cloud data. Connection may be broken.\n" + \
+               str(self.path)
+
 class YTEllipsoidOrdering(YTException):
     def __init__(self, pf, A, B, C):
         YTException.__init__(self, pf)
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.