Source

IRATE-format / irate / AHFascii.py

Full commit
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
#!/bin/python
from __future__ import division

import h5py
from numpy import array,empty,zeros,fromstring,append
import struct,os,sys
from optparse import OptionParser
import gc
from math import ceil

def read_halos(fname,nogas):
	"""Reads an AHF_halos file, then collimates it such that a list of column
		headers and a list of arrays, each of which corresponds to a column.
		All entries in the columns will be in the same order--i.e. the ith
		entry in column x corresponds to the same halo as the ith entry in 
		column y.
	:param fname:  The file to be read; should be in ASCII format with column
		headers preceeded by a #
	:param nogas:  True if AHF was NOT compiled with -DGAS_PARTICLES; otherwise
		this will fail in attempting to collimate the data because it will try
		to combine columns (e.g. Lx_gas, Ly_gas, and Lz_gas) that don't exist.
	"""
    f = open(fname,'r')
    data = []
    try:
        for line in f:
            if line[0] == '#':
                if len(line) > 1:   #i.e. it's not just a hash mark and nothing else
                    comments = line.lstrip('#').split() #Note that if files are catted together, this'll end up being the headers of the last file (but they're presumed to all be the same)
                    for i in range(len(comments)):      #Might want to put this after I use the comments to name some datasets--indices could be useful
                        comments[i] = comments[i].strip("("+str(i+1)+")")
                continue
            data.append(fromstring(line,dtype='float',sep='\t'))
            
    finally:
        f.close()

    nhalos = len(data)

    print "Read data from file...now collimating."
 
    #Now collimate the data, so I return a list of columns rather than a list of rows; however, there are a few things (center pos and Vel) that I want as 2D lists
    coldata = empty([len(data[0]),len(data)])   #So it's an array with N_attributes columns and N_halos rows
    for j in range(len(data)):
        for i in range(len(data[0])):
            coldata[i][j] = data[j][i]
            
    mycen = empty([nhalos,3])
    myvel = empty([nhalos,3])
    myL = empty([nhalos,3])
    myEA = empty([nhalos,3])
    myEB = empty([nhalos,3])
    myEC = empty([nhalos,3])
    
    if not nogas:     #Then I'll have the above for gas and stars as well, which will have to be worked into the comments and everything too
        Lgas = empty([nhalos,3])
        EAgas = empty([nhalos,3])
        EBgas = empty([nhalos,3])
        ECgas = empty([nhalos,3])
        Lstar = empty([nhalos,3])
        EAstar = empty([nhalos,3])
        EBstar = empty([nhalos,3])
        ECstar = empty([nhalos,3])
        
    #npart(1)    nvpart(2)    Xc(3)    Yc(4)    Zc(5)    VXc(6)    VYc(7)    VZc(8)    Mvir(9)    Rvir(10)    Vmax(11)    Rmax(12)    sigV(13)    lambda(14)    Lx(15)    Ly(16)    Lz(17)    a(18)    Eax(19)    Eay(20)    Eaz(21)    b(22)    Ebx(23)    Eby(24)    Ebz(25)    c(26)    Ecx(27)    Ecy(28)    Ecz(29)    ovdens(30)    Redge(31)    nbins(32)    Ekin(33)    Epot(34)    mbp_offset(35)    com_offset(36)    r2(37)    lambdaE(38)    v_esc(39)    Phi0(40)    n_gas(41)    M_gas(42)    lambda_gas(43)    Lx_gas(44)    Ly_gas(45)    Lz_gas(46)    a_gas(47)    Eax_gas(48)    Eay_gas(49)    Eaz_gas(50)    b_gas(51)    Ebx_gas(52)    Eby_gas(53)    Ebz_gas(54)    c_gas(55)    Ecx_gas(56)    Ecy_gas(57)    Ecz_gas(58)    Ekin_gas(59)    Epot_gas(60)    lambdaE_gas(61)    n_star(62)    M_star(63)    lambda_star(64)    Lx_star(65)    Ly_star(66)    Lz_star(67)    a_star(68)    Eax_star(69)    Eay_star(70)    Eaz_star(71)    b_star(72)    Ebx_star(73)    Eby_star(74)    Ebz_star(75)    c_star(76)    Ecx_star(77)    Ecy_star(78)    Ecz_star(79)    Ekin_star(80)    Epot_star(81)    lambdaE_star(82)    
    
    for i in range(len(coldata[0])):
        mycen[i] = [coldata[2][i],coldata[3][i],coldata[4][i]]
        myvel[i] = [coldata[5][i],coldata[6][i],coldata[7][i]]
        myL [i] = [coldata[14][i],coldata[15][i],coldata[16][i]]
        myEA[i] = [coldata[18][i],coldata[19][i],coldata[20][i]]
        myEB[i] = [coldata[22][i],coldata[23][i],coldata[24][i]]
        myEC[i] = [coldata[26][i],coldata[27][i],coldata[28][i]]
        if not nogas:
            try:
                Lgas[i] = [coldata[43][i],coldata[44][i],coldata[45][i]]
            except IndexError:
                print "\nahf2irate is attempting to combine columns related to three dimensional\nproperties of the gas and star particles, but it appears that you didn't\ncompile AHF with -DGAS_PARTICLES.\nTry rerunning the script with the --nogas option.\n"
                print "Exiting."
                sys.exit(1337)
            EAgas[i] = [coldata[47][i],coldata[48][i],coldata[49][i]]
            EBgas[i] = [coldata[51][i],coldata[52][i],coldata[53][i]]
            ECgas[i] = [coldata[55][i],coldata[56][i],coldata[57][i]]
            Lstar[i] = [coldata[64][i],coldata[65][i],coldata[66][i]]
            EAstar[i] = [coldata[68][i],coldata[69][i],coldata[70][i]]
            EBstar[i] = [coldata[72][i],coldata[73][i],coldata[74][i]]
            ECstar[i] = [coldata[76][i],coldata[77][i],coldata[78][i]]

    mycoldata = [coldata[0],coldata[1],mycen,myvel,coldata[8],coldata[9],coldata[10],coldata[11],coldata[12],coldata[13],myL,coldata[17],myEA,coldata[21],myEB,coldata[25],myEC] #Re-assemble the data into columns with (x,y,z) things in 2D lists
    mycomments = [comments[0],comments[1],"Center","Velocity","Mvir","Rvir","Vmax","Rmax","sigV","lambda","L","a","Ea","b","Eb","c","Ec"]
    
    if not nogas:     #Then I need to carefully append the rest of the information, including changing labels where I concatenated
        for i in range(29,43):   #i.e. the data between Ecz_all and L_gas
            mycoldata.append(coldata[i])
            mycomments.append(comments[i])
        #Now to carefully put together the rest of my list
        mycoldata.append(Lgas)
        mycomments.append("L_gas")
        
        mycoldata.append(coldata[46])
        mycomments.append(comments[46])
        #mycomments.append("a_gas")
        
        mycoldata.append(EAgas)
        mycomments.append("Ea_gas")
        
        mycoldata.append(coldata[50])
        mycomments.append(comments[50])
        #mycomments.append("b_gas")
        
        mycoldata.append(EBgas)
        mycomments.append("Eb_gas")
        
        mycoldata.append(coldata[54])
        mycomments.append(comments[54])
        #mycomments.append("c_gas")
        
        mycoldata.append(ECgas)
        mycomments.append("Ec_gas")
        
        for i in range(58,64):      #i.e. the data between ECz_gas and L_star
            mycoldata.append(coldata[i])
            mycomments.append(comments[i])
        
        mycoldata.append(Lstar)
        mycomments.append("L_star")
        
        mycoldata.append(coldata[67])
        mycomments.append(comments[67])
        
        mycoldata.append(EAstar)
        mycomments.append("Ea_star")
        
        mycoldata.append(coldata[71])
        mycomments.append(comments[71])
        
        mycoldata.append(EBstar)
        mycomments.append("Eb_star")
        
        mycoldata.append(coldata[75])
        mycomments.append(comments[75])
        
        mycoldata.append(ECstar)
        mycomments.append("Ec_star")
        
        for i in range(80,len(coldata)):    #And append on the rest of the information
            mycoldata.append(coldata[i])
            mycomments.append(comments[i])
    else:       #Then I don't have to worry about the columns for the gas that I've separated out, and instead can just append all that's left
        for i in range(29,len(coldata)):    #Add the rest of the columns to mycoldata, but skip those that I've already added
            mycoldata.append(coldata[i])
            mycomments.append(comments[i])
    
    return [mycoldata,mycomments]
    
def read_particles(fname):
    """Read a *.AHF_particles file and return an array with all the
    particles in it in order, along with a list of the number of particles
    in each halo.  If there's a particle identifier, the array is Nx2 with
    the identifier in the second column.

	:param fname:  The .AHF_particles file to be read.  Must be in ASCII format
	"""
    f = open(fname,'r')       
    linecount = 0
    for line in f:
        if linecount > 2:  break
        if linecount == 2:  #Then I'm looking at a particle; let's find out if it has a particle type
            if len(fromstring(line,dtype='int',sep='\t')) > 1:  ptypes = True
            else:   ptypes = False
        linecount = linecount + 1
    
    f.seek(0)
    if ptypes:  data = empty((0,2))
    else:   data = empty(0)

    numpartlist = []
    num_halos = None
    in_halo = False
    try:
        for line in f:
            if num_halos == None:   
                num_halos = fromstring(line,dtype='int',sep="not an empty string")[0]
                halos_read = 0
                in_halo = False
                continue
            if not in_halo:
                num_parts = fromstring(line,dtype='int',sep="not an empty string")[0]
                numpartlist.append(num_parts)
                parts_read = 0
                in_halo = True
                particles = ""
                continue
            particles = particles + line
            parts_read = parts_read + 1
            if parts_read == num_parts:
                #Note that sep = \t will also accept \n as a separator
                if ptypes:  data = append(data,fromstring(particles,dtype='int',sep='\t').reshape(-1,2))
                else:    data = append(data,fromstring(particles,dtype='int',sep='\t'))                
                in_halo = False
                halos_read = halos_read + 1    
            if halos_read == num_halos:
                num_halos = None
            
    finally:
        f.close()
    
    return [data,numpartlist]         



def read_and_write_particles(infile,outfile,maxsize):
    """The HDF5 v 1.8 method of reading and writing particles should allow
    me to use a lot less memory by periodically writing to the file (since
    v 1.8 allows for appending to datasets).  Note that this will probably be
    slower than the 1.6 method, at least for small files, but if memory is a 
    problem, this is probably the way to go.

	NOTE:  THIS DOESN'T WORK RIGHT NOW.  I can't get the appending to work, so
		if you desperately want to use this function, you'll have to fix it.
		In fact, I should probably just delete it, but I always hate doing that
		
	:param infile:  The AHF_particles file to read.  Must be in ASCII format.
	:param outfile:  The IRATE catalog file to save the particle data to.
	:param maxsize:  The maximum amount of memory that can be used at any given
		time, in GB.
	"""
    #Read a *.AHF_particles file and return list structured with all the particles in the first halo in the first entry, all the particles in the second entry in the second, etc,
    #Each particle being represented by a list where first element is the ID and the second is the particle type if a particle type is found in the first particle
    maxsizebytes = maxsize*1024*1024*1024
    f = open(infile,'r')
    linecount = 0
    for line in f:
        if linecount > 2:  break
        if linecount == 2:  #Then I'm looking at a particle; let's find out if it has a particle type
            if len(fromstring(line,dtype='int',sep='\t')) > 1:  ptypes = True
            else:   ptypes = False
        linecount = linecount + 1
    
    f.seek(0)
    p = h5py.File(outfile,'a')
    if ptypes:  
        data = empty((0,2))
        p.create_dataset("Particles",(1,2),dtype="int",maxshape=(None,2))
        #Unfortunately can't specify zero length datasets, which will be a problem later
    else:   
        data = empty(0)
        p.create_dataset("Particles",(1,),dtype="int",maxshape=None,chunks=(1,))    
        #Have to specify chunks to allow resizing here
    p.close()

    numpartlist = []
    num_halos = None
    in_halo = False
    try:
        for line in f:
            if num_halos == None:   
                num_halos = fromstring(line,dtype='int',sep="not an empty string")[0]
                halos_read = 0
                in_halo = False
                continue
            if not in_halo:
                num_parts = fromstring(line,dtype='int',sep="not an empty string")[0]
                numpartlist.append(num_parts)
                parts_read = 0
                in_halo = True
                particles = ""
                continue
            particles = particles + line
            parts_read = parts_read + 1
            if parts_read == num_parts:
                #Note that sep = \t will also accept \n as a separator
                if ptypes:  data = append(data,fromstring(particles,dtype='int',sep='\t').reshape(-1,2))
                else:    data = append(data,fromstring(particles,dtype='int',sep='\t'))                
                in_halo = False
                halos_read = halos_read + 1
                if data.nbytes > maxsizebytes:
                    print "Dumping current array to "+outfile
                    #Then I want to write data to a file and then clear it
                    p = h5py.File(outfile,'a')
                    curlen = p['Particles'].shape[0]
                    #Now I have to build in the unfortunate check for len 1 datasets
                    if curlen == 1:
                        if ptypes:  p['Particles'].resize((data.shape[0],2))
                        else: p['Particles'].resize((data.shape[0],))
                        p['Particles'][:] = data
                    else:
                        if ptypes:  p['Particles'].resize((curlen+data.shape[0],2))
                        else: p['Particles'].resize((curlen+data.shape[0],))
                        p['Particles'][curlen:curlen+data.shape[0]] = data
                    p.close()
                    if ptypes:  data = empty((0,2))
                    else:   data = empty(0) 
            if halos_read == num_halos:
                num_halos = None
            
    finally:
        f.close()

    print "Dumping remaining data to "+outfile
    p = h5py.File(outfile,'a')
    curlen = p['Particles'].shape[0]
    if curlen == 1:
        print "No data in file currently."
        if ptypes:  
            print "About to resize to a {0} by 2 array.".format(data.shape[0])
            p['Particles'].resize((data.shape[0],2))
        else:
            print "About to resize to a {0} length array.".format(data.shape[0])
            p['Particles'].resize((data.shape[0],))
        print "About to fill the array with data...."
        p['Particles'][:] = data
        print "Success!"
    else:
        if ptypes:  p['Particles'].resize((curlen+data.shape[0],2))
        else: p['Particles'].resize((curlen+data.shape[0],))
        p['Particles'][curlen:curlen+data.shape[0]] = data
    print "Adding numpartlist"
    p.create_dataset("ParticlesPerHalo",data=numpartlist)
    print "Closing the file."
    p.close()
    print "Closed!"
    
def read_profiles(fname,nhalos,nbins):
	"""Read an ASCII .AHF_profiles file, collimate it, and return a list of
		data and a list of column headers.  The list of data is a list over
		columns, then each halo has it's own list within the columns.
		
	:param fname:  The ASCII format .AHF_profiles file to be read.
	:param nhalos:  The number of halos contained in the file
	:param nbins:  An array or list that tells you the number of
		radial bins for each halo
	"""
    #Read a *.AHF_profiles file based on the total number of halos and a list of the number of radial bins that each halo has
    #Return a list where each entry is a halo, and each entry within that list is the data at a given radial bin
    f = open(fname,'r')
    data = []
    try:
        comments = f.readline().lstrip('#').split()
        for i in range(nhalos):
            data.append([])
            for j in range(nbins[i]):
                data[i].append(fromstring(f.readline(),dtype='float',sep='\t'))
    finally:
        f.close()
        
    print "Read profile information...now putting in the desired order."
        
    #Now get rid of the numbers in the comments:
    for i in range(len(comments)):      #Might want to put this after I use the comments to name some datasets--indices could be useful
        comments[i] = comments[i].strip("("+str(i+1)+")")
    
    #Now to separate into columns, which will be a little tricky.  
    #What I want are Nhalo x max(nbins) arrays for each property, except I want to combine the L and E_whatevers into one thing
    #What I have now is a list of all the halos, and within each halo is a list of each row
    mylen = max(nbins)
    coldata = []
    mycomments = []
    for i in range(0,7):
        coldata.append([])      #So each list within coldata corresponds to a column
        mycomments.append(comments[i])
    #Add on the columns that I'm concatenating (L, Ea, Eb, and Ec) plus those in the middle
    coldata.append([])
    mycomments.append("L")
    coldata.append([])
    mycomments.append("a")
    coldata.append([])
    mycomments.append("Ea")
    coldata.append([])
    mycomments.append("b")
    coldata.append([])
    mycomments.append("Eb")
    coldata.append([])
    mycomments.append("c")
    coldata.append([])
    mycomments.append("Ec")
    for i in range(22,len(comments)):       #Add on the rest of the columns
        coldata.append([])
        mycomments.append(comments[i])
        
    for i in range(len(coldata)):       #Now prep the columns to receive a series of information about each halo that's at least as long as necessary
        for j in range(nhalos):
            if i == 7 or i == 9 or i == 11 or i == 13:
                coldata[i].append(zeros((mylen,3)))     #L, Ea, Eb, and Ec get 3D arrays
            else:
                coldata[i].append(zeros(mylen))
    
    #Now I have to fill out those arrays using the info in data
    #Data is as long as the number of halos, and data[x] is as long as the number of columns\
    #I can't loop over data[x] though because I'm concatenating a few of the columns--I can loop range(0,7) and range(22,end) 
    #or I can loop over all of it and build in exceptions
    for i in range(len(data)):      #So this loops through the halos
        for j in range(len(data[i])):       #So this loops over the radial bins--data[i][j] is all the data at a single radial bin
            for k in range(len(data[i][j])):    #k should actually always be the same, because all the radial bins have the same info calculated in them
            #Now I have to be careful of the fact that I have fewer data column in my final because of concatentation
                if k < 7:
                    coldata[k][i][j] = data[i][j][k]                    
                elif k == 7:    #Lx
                    coldata[k][i][j,0] = data[i][j][k]
                elif k == 8:    #Ly
                    coldata[7][i][j,1] = data[i][j][k]
                elif k == 9:
                    coldata[7][i][j,2] = data[i][j][k]
                elif k == 10:
                    coldata[8][i][j] = data[i][j][k]
                elif k == 11:
                    coldata[9][i][j,0] = data[i][j][k]
                elif k == 12:
                    coldata[9][i][j,1] = data[i][j][k]
                elif k == 13:
                    coldata[9][i][j,2] = data[i][j][k]
                elif k == 14:
                    coldata[10][i][j] = data[i][j][k]
                elif k == 15:
                    coldata[11][i][j,0] = data[i][j][k]
                elif k == 16:
                    coldata[11][i][j,1] = data[i][j][k]
                elif k == 17:
                    coldata[11][i][j,2] = data[i][j][k]
                elif k == 18:
                    coldata[12][i][j] = data[i][j][k]
                elif k == 19:
                    coldata[13][i][j,0] = data[i][j][k]
                elif k == 20:
                    coldata[13][i][j,1] = data[i][j][k]
                elif k == 21:
                    coldata[13][i][j,2] = data[i][j][k]
                else:   #So k >= 22, and subtract off the columns that I concatenated
                    coldata[k-8][i][j] = data[i][j][k]

    
    return [coldata,mycomments]
    
    
def add_ahf_param(pfile,outname):
	"""Reads an AHF .parameter file and adds it to an existing IRATE file
		as attributes to the /CatalogHeader/AHF group.
	:param pfile:  The parameter file to be read.  Should be standard AHF
		ASCII parameter file format.
	:param outname:  The IRATE file to store the parameters in.
	"""    
    #First read in the file:
    f = open(pfile)
    lines = []
    for line in f:
        if '\t' not in line:  continue  #All the interesting lines have the format [PARAMETER] (tab) [VALUE] (tab) (new line), so anything without a tab isn't a parameter
        if line.startswith('*'):  continue  #Comment lines have this format, just in case.
        lines.append(line.split())
    f.close()
    
    out = h5py.File(outname)
    header = out['CatalogHeader'].create_group('AHFHeader')
    for i in range(len(lines)):
        header.attrs[lines[i][0]] = lines[i][1]
    out.close()

    
def write_halodata(halocols,headers,outname):
	"""Write colimmated data from read_halos to an IRATE file.
	:param halocols:  The collimated data, in list form.  Each entry should
		be a list of data points, and each entry should be in the same order.
	:param headers:  A list of names for each column, e.g. Center or Velocity,
		that will be used to name the HDF5 datasets.
	"""
    #Write AHF data as an IRATE file
    f = h5py.File(outname,'a')
    fileheader = f.create_group('CatalogHeader')
    cat = f.create_group('Catalog')
    #Now to create the datasets:
    for i in range(len(headers)):
        cat.create_dataset(headers[i],data=halocols[i])
    
    f.close()

    
def add_particles(particles,numpartlist,pfilename,catfilename):
	"""Writes the particles contained within halos to an HDF5 file, then
		link that file to an existing IRATE catalog.
	:param particles:  An array of all the particles, in order.  Can be N x 2
		if particle type is also saved.
	:param numpartlist:  An array of all the number of particles that reside in
		each halo, in the same order as the particles are given.
	:param pfilename:  The HDF5 file to save the previous two arrays to.
	:param catfilename:  The IRATE file to link pfilename to.
	"""
    #These should go in another file that is linked to the main file
    #First I'll create the second file
    p = h5py.File(pfilename,'w')
    p.create_dataset("Particles",data=particles)
    p.create_dataset("ParticlesPerHalo",data=numpartlist)
    p.close()
    #Now link that information to the main catalog file
    try:
        f = h5py.File(catfilename,'a')
        parts = f['Catalog'].create_group("ParticleData")
        parts["Particles"] = h5py.ExternalLink(pfilename,"/Particles")
        parts["ParticlesPerHalo"] = h5py.ExternalLink(pfilename,"/ParticlesPerHalo")
    finally:
        f.close()



def link_pfile(pfilename,catfilename):
	"""Link an existing HDF5 file with datasets for the particles and for
	 the number of particles in each halo to an existing IRATE catalog file.
	
	:param pfilename:  The HDF5 file with the particle info in it.
	:param catfilename:  The IRATE file with the corresponding catalog data.
	"""  
    try:
        f = h5py.File(catfilename,'a')
        parts = f['Catalog'].create_group("ParticleData")
        parts["Particles"] = h5py.ExternalLink(pfilename,"/Particles")
        parts["ParticlesPerHalo"] = h5py.ExternalLink(pfilename,"/ParticlesPerHalo")
    finally:
        f.close()

def add_profiles(profilecols,headers,outname):
	"""Adds the radial profile data from read_profiles to an existing IRATE
		catalog file.
		
	:param profilecols:  The collimated profile data that is list over
		columns (i.e. properties), then each halo has it's own list within
		the columns.
	:param headers:  The names of the various columns, which are used to name
		the HDF5 datasets.
	:param outname:  The existing IRATE catalog file to save the Radial data to
	"""
    f = h5py.File(outname,'a')
    profs = f['Catalog'].create_group('RadialProfiles')
    for i in range(len(headers)):
        profs.create_dataset(headers[i],data=profilecols[i])
    f.close()