Source

Where we meet / WhereWeMeet.py

'''
Where We Meet

Tools for constructing a google map with contour lines of areas easiest for members of a group to get to.
 
Written by Elliot Hallmark (permafacture@gmail.com)

Free to use.  Have fun.
'''



from googlemaps import GoogleMaps,GoogleMapsError  
import pygmaps
#import pyshp

from numpy import arange, linspace, zeros #is aranged still used?
import csv

#lat, long of boundry defining points
ur = [30.494243, -97.430981] #lat,long of lower left corner of area to consider
ll = [30.116622, -97.76881] #lat,long of upper right corner of area to consider

center = [(ll[0]+ur[0])/2,(ll[1]+ur[1])/2]

gridfilename = 'data/_gridfile.csv'

try:
    with open('./googleapikey') as f:
        GMAPS_API_KEY = f.readline().rstrip()
except IOError:
    print '''
    FATAL ERROR: You don't have a google API key set up yet!
    Get your key from google.  Then put it in a file called 'googleapikey' in
    the same directory as this script.\n'''
    raise
    

#this will be needed globally
gmaps = GoogleMaps(GMAPS_API_KEY, referrer_url='http://www.google.com/')


def traveltime(location1,location2):
    '''Use google maps to calculate travel time between tow locations.
    This function retries a few times if query to google fails.'''

    #retries are necessary.
    for tries in range(5):
        try:
            results = gmaps.directions(location1,location2)
        except GoogleMapsError:
            pass #keep trying, should i phutz with location values to coax a good response?
        else:
            #success
            return results ['Directions']['Duration']['seconds']
    else:
        raise GoogleMapsError('\nlocation1: %s \nlocation2: %s' % (location1,location2))

def gen_grid_file(ll, ur, divisions=30.):
    '''Generate a csv of points that all other data files will refer to.
    This file is defined within this module.  Edit the source code to change it.
    input the lower left and upper right corners of the area to be anaylzed'''

    from os.path import isfile
    if isfile(gridfilename):
        print '''
    There is data in the data directory already!
    Creating a new gridfile will corrupt that data.
    If you meant to start fresh, please delete or move the gridfile and other csv files and try again.

        '''
        raise IOError
         
    llx,lly = ll
    urx,ury = ur
    #things go from right to left, and lower to upper
    #xspacing = float((urx-llx))/divisions
    #yspacing = float((ury-lly))/divisions
    xs = linspace(llx,urx,divisions)
    ys = linspace(lly,ury,divisions)
    with open(gridfilename, 'wb') as csvfile:
        csvwriter = csv.writer(csvfile, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        for y in ys:
            row=[]
            for x in xs:
                row.append(str([x,y]))
            csvwriter.writerow(row)
    
def time_map_from_address(source, filename='data/test.csv'):
    '''Uses the gridfile and creates a csv file of travel times from source to all grid points.
    The data is saved in 'filename'
    returns # of results written and # of errors.'''

    errors = 0
    results = 0
    with open(gridfilename, 'rb') as gridfile:
     with open(filename,'wb') as outfile:
        reader = csv.reader(gridfile, delimiter='\t', quotechar='"')
        writer = csv.writer(outfile, delimiter='\t', quotechar='"')
        for row in reader:
            result_row = []
            for item in row:
                #eval usage stolen from internet: string representation of list to actual list
                try: 
                    result = traveltime(source,tuple(eval(item, {'__builtins__':None}, {})))
                except GoogleMapsError:
                    result = None
                    errors += 1
                results +=1
                result_row.append(str(result))
            writer.writerow(result_row)
    return results,errors

def coords_of_nones(filename='data/elliot.csv'):
    '''For Debugging. Errors in google maps pathing create None values.
    this function uses the local gridfile and returns the coordinates of failed queries in "filename".'''
    result = []
    with open(gridfilename, 'rb') as grid:
     with open(filename,'rb') as data:
        grid_reader = csv.reader(grid, delimiter='\t', quotechar='"')
        data_reader = csv.reader(data, delimiter='\t', quotechar='"')
        for datarow in data_reader:
            gridrow=grid_reader.next()
            gridrow=gridrow.__iter__()
            for item in datarow:
                gitem=gridrow.next()
                if item == 'None':
                    #print gitem
                    result.append(tuple(eval(gitem, {'__builtins__':None}, {}))) 
    return result

def patch_data(source,filename):
    '''If a travel time csv file still has Nones in it, this function will try and patch it.
    If pathcing fails, there is probably some other problem (location unreachable, no connection to google, etc.'''
    nones = coords_of_nones(filename=filename)
    patches = []
    i=0
    for non in nones:
        #TODO if traveltime fails again, this function should just put in a really high travel time.
        #the location could be unreachable.
        patches.append(traveltime(source,non))
    with open(filename, 'rb') as data:
     with open('data/patched.csv','wb') as result:
        result_writer = csv.writer(result, delimiter='\t', quotechar='"')
        data_reader = csv.reader(data, delimiter='\t', quotechar='"')
        for datarow in data_reader:
            result = []
            for item in datarow:
                if item != 'None':
                    result.append(item)
                else:
                    #nones are encountered in the same order as patches was constructed
                    result.append(patches[i])
                    i+=1
            result_writer.writerow(result)

def get_contour_paths(filename, n=10):
    '''Uses gridfile and the file at 'filename' to return contour paths, colors and values''' 
    #TODO: currently this plot is rotated with respect to google map
    import matplotlib.pyplot as plt
    
    Xs = []
    Ys = []
    Zs = []
    #extract data from files
    with open(gridfilename, 'rb') as grid:
        #not using with..as because filename could be file or array
        data = open(filename,'rb') 
        grid_reader = csv.reader(grid, delimiter='\t', quotechar='"')
        data_reader = csv.reader(data, delimiter='\t', quotechar='"')
        #print 'hello2'
        for datarow in data_reader:
            #print 'hello'
            gridrow=grid_reader.next()
            gridrow=gridrow.__iter__()
            row = []
            for item in datarow:
                gitem = gridrow.next()
                gitem = tuple(eval(gitem, {'__builtins__':None}, {}))
                Xs.append(gitem[0])
                Ys.append(gitem[1])
                row.append(item)
            Zs.append(row)
    #plt.figure()
    #filter Xs and Ys to be what contour expects. the Xs and Ys of a grid
    m = len(Zs[0])
    Xs = Xs[:m]
    Ys = Ys[::m]
    C = plt.contour(Xs,Ys,Zs,n)
    #plt.show()
    levels = []
    colors = []
    for collection in C.collections:
        colors.append(collection.get_color())
        ps = []
        for p in collection.get_paths():
          ps.append(p.vertices)
        levels.append(ps)
    #paths, colors of paths, and values represented by paths
    return levels,colors,C.levels

def sum_files(source_path="data/",save_file='_sumfile.csv'):
    '''create a file that sums all the travel times from all csv that don't start with '_' in path.'''
    import os
    path=source_path
    #first determine size of the grid from _gridfile.csv
    with open(gridfilename,'rb') as f:
     r = csv.reader(f,delimiter='\t', quotechar='"')
     c=0
     for row in r:
        c+=1
    result = zeros((c,len(row)))

    #now iterate through files and sum fields
    for f in os.listdir(path):
        ff = os.path.join(path,f)
        if f[0] != '_' and os.path.isfile(ff):
          with open(ff,'rb') as fff:
            r = csv.reader(fff,delimiter='\t', quotechar='"')
            print 'open'
            for i,row in enumerate(r):
                for j,item in enumerate(row):
                    result[i][j] += float(item)

    #turn result into a file.  I thought I could use the array directly but guess not
    with open(os.path.join(path,save_file),'wb') as f:
        w = csv.writer(f,delimiter='\t', quotechar='"')
        for row in result:
            w.writerow(row)
    
    return result

def process_users(filename, output_path ="data/"):
    '''create data files for all users on a spreadsheet'''
    import os
    path=output_path
    ls = os.listdir(path)
    with open(filename,'rb') as f:
        reader = csv.DictReader(f,delimiter='\t')
        print f
        for row in reader:
            print row.keys()
            name = row['user'].lower()
            print "This will take a while.  processing user %s..." % (name,)
            if '%s.csv' % (name,) in ls:
                print 'previously processed'
            else:
                print 'Starting time map...'
                res,err = time_map_from_address(row['address'], filename='data/%s.csv' % name)
                print 'finished %i quieries with %i errors' % (res,err)
                if err:
                    print """\nYou should run 'patch_data' on data/%s.csv'. You must respecify the address.""" %(name,) 
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.