Source

bubble-economy / handy_scripts.py

"""
handy_scripts.py

handy snippets to copy and paste into my terminal
"""
"""
for b in trial_warehouse.find(lambda r: r.experiment_id in ['pure_wicks_2d']):
    try:
        trial_warehouse.trash()
    except KeyError:
        pass
##########
for b in trial_warehouse.find():
    try:
        del(b.stats['last_sliver_susceptibility'])
        trial_warehouse.store(b)
    except KeyError:
        pass
##########
for exp in set(['buckingham_2d_param_sweep_fig_4a_wicks',
     'buckingham_2d_param_sweep_fig_4a_wicks_harder_core',
     'buckingham_lowd_param_sweep',
     'ergodic_dim_sweep']):
    experiments.get_reduced_experiment(trial_warehouse, exp)
####################
from utils import addict
import experiments
es=experiments.get_trial_warehouse('pure_wicks_2d_300_plus_b')
for e in es.find():      
   for k in e.stats:
       if isinstance(e.stats[k], list) and isinstance(e.stats[k][0], dict):
           e.stats[k] = addict(e.stats[k])
   es.store(e)
o=experiments.get_reduced_experiment('pure_wicks_2d_300_plus_b')
####################
plots.per_dim_multiplot(
  'wicks_fig5_lowd', 
  'P1', [
     # ['vel_loc_self_rigid_bin_mi_slim_complete', 'tree_mi_w'],
     # ['vel_loc_self_rigid_bin_mi_slim_continuous', 'tree_mi_w'],
     ['vel_loc_self_rigid_bin_mi_complete', 'tree_mi_w'],
     ['vel_loc_self_rigid_bin_mi_continuous', 'tree_mi_w'],
     #['vel_loc_self_rigid_bin_mi_complete', 'mean_mi'],
     #['vel_loc_self_rigid_bin_mi_continuous', 'mean_mi'],
     # ['vel_loc_self_mi_slim_continuous', 'tree_mi_w'],
     # ['vel_loc_self_mi_slim_complete', 'tree_mi_w'],
     ['vel_loc_self_mi_slim_continuous', 'tree_mi_raw'],
     ['vel_loc_self_mi_slim_complete', 'tree_mi_raw'],
     ['vel_loc_self_mi_slim_continuous', 'mean_mi'],
     ['vel_loc_self_mi_slim_complete', 'mean_mi'],
     # ['vel_loc_self_mi_continuous', 'tree_mi_w'],
     # ['vel_loc_self_mi_complete', 'tree_mi_w'],
     # ['link_stats', 'mean_degree'],
     # ['susceptibility_continuous'],
     # ['susceptibility_piecewise'],
     # ['order_complete'],
     # ['order_continuous'],
  ],
)
#############################
e_trial_vars[x_lookup[0]]
e_boxes[x_lookup[0]]
e_trial_stats[x_lookup[0]]
e_trial_stats[y_path[0]][y_path[1]]['mean'][x_lookup[0]]
e_trial_stats[y_path[0]][y_path[1]]['std_dev'][x_lookup[0]]
x[x_lookup[0]]
y[x_lookup[0]]
y_error[x_lookup[0]]
e_trial_vars[x_lookup[0]]
e_boxes[x_lookup[0]]
!w=get_trial_warehouse('wicks_fig5_lowd')
e_boxes[x_lookup[0]]
bx = [w.fetch(bn) for bn in e_boxes[x_lookup[0]]]
[b.stats[y_path[0]][y_path[1]][0] for b in bx]
np.array([b.stats[y_path[0]][y_path[1]][0] for b in bx]).mean()
################################
w = get_trial_warehouse('animate_me')
b=w.find(lambda b: b.trader_factory_kwargs['dimensions']==2 and b.trader_factory_kwargs['P1']==6.0).next()
ts = b.traderset
plots.animated_plot(ts)
###############################
plots.multiplot_from_disk(
  'pure_wicks_2d_grid', 
  'P1', 'P2', [
    ['vel_loc_self_mi_stepped', 'tree_mi_raw'],
    ['mi_wicks_2d_continuous', 'est'],
  ],
  'P1', 'P2',
  z_labels = [
     'Stepped Cellucci Mutual Information',
     'Continuous Wicks Mutual Information',
  ],
  error_bars = True,
)
###############################
# a data set that is too large for any plausibly-sized test run:
from pandas import DataFrame
import experiments

data_table = []
w=experiments.get_trial_warehouse('ergodic_bias_sweep')

# b=w.find().next()

for b in w.find():
    param=b._metadata['P1']
    for stat_name, stat in b.stats.iteritems():
        # s1=b.stats['order_swept']
        # stat_name = 'order_swept'
        for n_steps in stat:
            for estimator in stat[n_steps]:
                for val in stat[n_steps][estimator]:
                    data_table.append(dict(
                        stat_name=stat_name,
                        n_steps=int(n_steps),
                        val=val,
                        estimator=estimator,
                        param=param))

df=DataFrame(data_table)
del(data_table)
df.to_csv('../bubble_economy.Data/text/ergodic_bias_sweep%s.csv' % stat_name)
###############################
# kinder, gentler version
import experiments
exp_name = 'ergodic_bias_sweep_manual'
w=experiments.get_trial_warehouse(exp_name)

# b=w.find().next()
headers = ["stat_name", "n_steps", "val", "estimator", "param"]

for b in w.find():
    param=b._metadata['P1']
    for stat_name, stat in b.stats.iteritems():
        handle = experiments.file_for_stat(exp_name, stat_name, headers)
        for n_steps in stat:
            for estimator in stat[n_steps]:
                for val in stat[n_steps][estimator]:
                    bits = dict(
                        stat_name=stat_name,
                        n_steps=int(n_steps),
                        val=val,
                        estimator=estimator,
                        param=param)
                    next_line = ",".join([
                            repr(bits[h]) for h in headers
                        ])+"\n"
                    handle.write(next_line)
                    
experiments.file_for_stat.close()
###############################
# wicks hybrid version
import experiments
output_name = 'wicks_fig5'
#output_name = 'vel_particlewise_panic'
#output_name = 'correlation_entropy'
#output_name = 'correlation_quick_dirty'
#for exp_name in ('wicks_fig5', 'wicks_fig5b', 'wicks_fig5_appendix', 'wicks_fig5_spleen'):
#for exp_name in ('vel_particlewise_panic',):
#for exp_name in ('correlation_entropy',):
#for exp_name in ('wicks_fig5', 'wicks_fig5_addendum', 'wicks_fig5_super_addendum'):
for exp_name in ('wicks_fig5', 'wicks_fig5_addendum'):
#for exp_name in ('wicks_fig5_super_addendum',):
#for exp_name in ('wicks_fig5_addendum',):
#for exp_name in ('correlation_quick_dirty',):
    w=experiments.get_trial_warehouse(exp_name)
    headers = ["val", "param", "basename"]
    for b in w.find():
        param=b._metadata['P1']
        for stat_name in b.stats.keys():
            print output_name, exp_name, stat_name, param
            stat=b.stats[stat_name]
            handle = experiments.file_for_stat(output_name, stat_name, headers)
            if (stat_name.find('particlewise')>=0) and (stat_name.find('macerate')==-1):
                for x in stat.itervalues():
                    assert len(x)==1
                    for e in x.values():
                        for val in e:
                            next_line = ",".join([
                                ("%f" % val),
                                ("%.5f" % (b._metadata.P1)),
                                repr(b._basename),
                            ])+"\n"
                            handle.write(next_line)
            else:
                for val in stat['est']:
                    next_line = ",".join([
                        ("%f" % val),
                        ("%.5f" % (b._metadata.P1)),
                        repr(b._basename),
                    ])+"\n"
                    handle.write(next_line)

experiments.file_for_stat.close()
--

###############################
# load shit
from experiments import get_trial_warehouse
w=get_trial_warehouse('animate_me')

for i, box in enumerate(w.find()):
    print box._basename
    plots.PLT.clf()
    plots.wicks_plot(box.traderset, 0, False)
    plots.PLT.savefig("vicsek-%d.eps" % i)
    plots.PLT.savefig("vicsek-%d.pdf" % i)

fields = ['P1', 'P2', 'P3', 'delta', 'num_agents', 'radius', 'noise']
boxes = list(w.find())

for i in [0,5,7]:
    ts = boxes[i].traderset
    print "i: %d" % i
    for f in fields:
        print "  ", f, getattr(ts,f)

# i: 0
#    P1 1.57079632679
#    P2 0.15
#    P3 0.98
#    delta 0.00264928914812
#    num_agents 1000
#    radius 0.0176619276541
#    noise 0.25
# i: 5
#    P1 2.74889357189
#    P2 0.15
#    P3 0.98
#    delta 0.00264928914812
#    num_agents 1000
#    radius 0.0176619276541
#    noise 0.4375
# i: 7
#    P1 0.0
#    P2 0.15
#    P3 0.98
#    delta 0.00264928914812
#    num_agents 1000
#    radius 0.0176619276541
#    noise 0.0
###############################
import experiments
handle = open('../bubble_economy.Data/text/vel_particlewise.csv', 'w')
header = ['_basename', 'P1', 'mi']
handle.write(','.join(header)+'\n')
for expname in ['vel_particlewise', 'vel_particlewise_panic']:
    w = experiments.get_trial_warehouse(expname)
    for b in w.find():
        for v in b.stats.values():
            for vv in v.values():
                print vv.values()[0][0]
                handle.write(','.join([
                    b._basename,
                    ("%.5f" % (b._metadata.P1)),
                    ("%.5f" % (vv.values()[0][0]))
                ])+'\n')

handle.close()
###############################
# reanaalysing
import experiments
import trader_stats_sets
w=  experiments.get_trial_warehouse('vel_particlewise_panic')
b = w.find().next()
b = experiments.reanalyse_trial(b, trader_stats_sets.particlewise_angley_apriori)
b.store()
###############################
#reanalysing 2
import trader_stats_sets
import experiments
b=experiments.get_trial_warehouse('wicks_fig5').find(lambda m: m.P1>2.5).next()
b = experiments.reanalyse_trial(b, trader_stats_sets.wicks_vs_me_vs_ince)
###############################
#reanalysing 3
import trader_stats_sets
import trader_stats
import experiments
b=experiments.get_trial_warehouse('vel_particlewise_panic').find(lambda m: m.P1>2).next()
ts=b.traderset
trader_stats.mi_distance_angular_vel_apriori_particlewise(ts, n_slices=1)

b = experiments.reanalyse_trial(b, trader_stats_sets.wicks_vs_me_vs_ince)
###############################
import experiments
import trader_stats_sets
w=experiments.get_trial_warehouse('correlation_entropy')
b=w.fetch('5cfd5d06def6ed3b53ab00b0b2577780a049507ff4bf175cfc01fb88')
stats_set = trader_stats_sets.particlewise_distance_correlations
b_args=b._metadata.copy()
#b=experiments.reanalyse_trial(b, **b_args)
b=experiments.simulate_and_analyse_trial(b, **b_args)
b.store()
boxez=[b]
for seed in range(100,103):
    local_b_args=b_args.copy()
    local_b_args['seed'] = seed
    local_b = w.Box(**local_b_args)
    local_b = experiments.simulate_and_analyse_trial(local_b, **b_args)
    local_b.store()
    local_b.clear_cache()
    boxez.append(local_b)
###############################
%pdb
import experiments
import trader_stats_sets
w=experiments.get_trial_warehouse('correlation_entropy')
b=w.find(lambda m: m.P1>1.5).next()
b_args=b._metadata.copy()
stats_set = trader_stats_sets.gu_danesque_continuous_test
b_args['stats_set']=stats_set
b=experiments.reanalyse_trial(b, dry_run=True, **b_args)
"""