Commits

dan mackinlay committed ce9b2b2

tidy up

Comments (0)

Files changed (1)

 Created by dan mackinlay on 2012-09-12.
 
 TODO:
-consider calculating MI between relative axial positions of pairs of particles, which also might work.
-(As we have no reason to suppose that MI is "concentrated" in one part of the state vector,
-and avial positions are quicker to calculate, AND don't have as much additional noise)
+* Consider calculating MI between *relative* axial positions or velocities of pairs of particles over time,
+  rather than branching histories, which also might work in the infinite-time limit.
+* Parallelise in something natural, such as redis or 0MQ, or ruffus.
 
 """
 from __future__ import division
 from random import sample
 
 # we expect params to contain the following keys:
-# num_agents, dt, noise, radius, steps
+# n_agents, dt, noise, radius, steps
 DEFAULT_PARAMS = {
-    'num_agents': 100,
+    'n_agents': 100,
     'dt': 0.01,
     'noise': 0.2,
     'radius': 0.05,
     'discard_steps': 100,
     'branch_steps': 1,
-    'num_branches': 100,
+    'n_branches': 100,
 }
 
-class SimState(object):
-    """A holder for some simulation state.
-    TODO: ensure that params is always copied by value."""
-    params = None
-    statevars = ['vels', 'locs']
-    def __init__(self, params):
-        self.params = params.copy()        
-    def clone(self):
-        """make a new copy of me with arrays pointing to the same memory locations
-        but with params dict cloned so that branching thing works without trampling state."""
-        clone = self.__class__(self.params.copy())
-        for name in self.statevars:
-            setattr(clone, name, getattr(self, name))
-        return clone
-
 def do_flocking_sim(simstate, steps=100, seed=1):
     """
     Starting from the state in *simstate*, iterate the simulation *steps* times.
     base_simstate.params['base_seed'] = seed
     return [
         do_flocking_sim(base_simstate, steps=params['branch_steps'], seed=seed+i)
-        for i in xrange(params['num_branches'])
+        for i in xrange(params['n_branches'])
     ]
 
 def analyse_branched_experiment(branches, n_pairs=10):
     # stacked_vels = np.dstack([b.vels for b in branches])
     #In fact, do we really need multiple axes of info? Nah.
     stacked_vels = np.vstack([b.vels[:,0] for b in branches])
-    num_agents = stacked_vels.shape[0]
+    n_agents = stacked_vels.shape[0]
     # Now, we estimate distributions of projected velocity in order to, in turn, estimate mean
     # mutual information.
     ests = np.zeros(n_pairs)
     for i in xrange(n_pairs):
         #this is not sliced efficiently. Should restack. Anyway...
-        pair = stacked_vels[sample(xrange(num_agents), 2), :]
-        ests[i] = ince_mi_dist_cont(pair[0], pair[1])
+        pair = stacked_vels[sample(xrange(n_agents), 2), :]
+        ests[i] = ince_mi_dist_cont(pair[0], pair[1])   
     return ests
 
+class SimState(object):
+    """A holder for some simulation state.
+    TODO: ensure that params is always copied by value."""
+    params = None
+    statevars = ['vels', 'locs']
+    def __init__(self, params):
+        self.params = params.copy()        
+    def clone(self):
+        """make a new copy of me with arrays pointing to the same memory locations
+        but with params dict cloned so that branching thing works without trampling state."""
+        clone = self.__class__(self.params.copy())
+        for name in self.statevars:
+            setattr(clone, name, getattr(self, name))
+        return clone
+
 def basic_flock_factory(params):
     """Initialise a sim with basic random state."""
     sim = SimState(params=params)
-    sim.locs = np.random.uniform(size=(params['num_agents'], 2))
+    sim.locs = np.random.uniform(size=(params['n_agents'], 2))
     sim.vels = random_unit_vectors(params)
     return sim
 
 def random_isotropic_vectors(params):
     """general a matrix of isotropically-distributed row vectors of mean length 1"""
     #the normal distribution is isotropic in all dimensions
-    return np.random.normal(0., 2 ** -0.5, (params['num_agents'], 2))
+    return np.random.normal(0., 2 ** -0.5, (params['n_agents'], 2))
 
 def random_unit_vectors(params):
     """general a matrix of isotropically-distributed unit row vectors"""
     ds.calculate_entropies(**kwargs)
     return ds.I()
 
-def choose_n_bins(n_data_points, test=True):
+def choose_n_bins(n_samples, test=True):
     """according to Cellucci et al (2005), the maximal number of bins is given
     thus"""
-    if n_data_points<20 and test:
-        raise ValueError("%d is too small a number to bin" % n_data_points)
-    return int(float(n_data_points/5.)**(0.5))
+    if n_samples<20 and test:
+        raise ValueError("%d is too small a number to bin" % n_samples)
+    return int(float(n_samples/5.)**(0.5))
+