Package ivs :: Package sed :: Module creategrids
[hide private]
[frames] | no frames]

Source Code for Module ivs.sed.creategrids

  1  """ 
  2  Create photometric and spectral grid. 
  3   
  4  It is possible to run this module from the terminal, e.g., to generate a 
  5  limb darkening grid. All input is automatically processed, so there are a few 
  6  tweaks you need to take into account when supplying parameters. Some of them are 
  7  explained in the L{ivs.aux.argkwargparser} module. 
  8   
  9  Examples:: 
 10   
 11      $:> python creategrids.py calc_limbdark_grid 'responses=["OPEN.BOL","MOST.V","GENEVA"]' ebvs=[0.05] outfile=mygrid.fits 
 12   
 13  """ 
 14  import sys 
 15  import astropy.io.fits as pf 
 16  import logging 
 17  import numpy as np 
 18  import time 
 19  from multiprocessing import cpu_count,Manager,Process 
 20  import os 
 21  import shutil 
 22  from ivs.sed import model 
 23  from ivs.sed import filters 
 24  from ivs.sed import reddening 
 25  from ivs.sed import limbdark 
 26  from ivs.inout import ascii 
 27  from ivs.aux import argkwargparser 
 28  from ivs.aux import loggers 
 29   
 30  logger = logging.getLogger('IVS.SED.CREATE') 
 31   
 32   
 33  #{ Helper functions 
 34   
35 -def get_responses(responses=None,add_spectrophotometry=False,wave=(0,np.inf)):
36 """ 37 Get a list of response functions and their information. 38 39 You can specify bandpass systems (e.g. GENEVA) and then all Geveva filters 40 will be collected. You can also specific specific bandpasses (e.g. GENEVA.V), 41 in which case only that one will be returned. The default C{None} returns 42 all systems except some Hubble ones. 43 44 You can also set responses to C{None} and give a wavelength range for filter 45 selection. 46 47 Example input for C{responses} are: 48 49 >>> responses = ['GENEVA.V','2MASS.J'] 50 >>> responses = ['GENEVA','2MASS.J'] 51 >>> responses = ['BOXCAR','STROMGREN'] 52 >>> responses = None 53 54 @param responses: a list of filter systems of passbands 55 @type responses: list of str 56 @param add_spectrophotometry: add spectrophotometric filters to the filter list 57 @type add_spectrophotometry: bool 58 @param wave: wavelength range 59 @type wave: tuple (float,float) 60 """ 61 #-- add spectrophometric filtes when asked or when BOXCAR is in responses 62 if add_spectrophotometry or (responses is not None and any(['BOXCAR' in i for i in responses])): 63 bands = filters.add_spectrophotometric_filters() 64 65 #-- if no responses are given, select using wavelength range 66 if responses is None: 67 responses = filters.list_response(wave_range=(wave[0],wave[-1])) 68 else: 69 responses_ = [] 70 if not any(['BOXCAR' in i for i in responses]) and add_spectrophotometry: 71 responses.append('BOXCAR') 72 for resp in responses: 73 logger.info('... subselection: {}'.format(resp)) 74 if resp=='OPEN.BOL': 75 responses_.append(resp) 76 else: 77 responses_ += filters.list_response(resp) 78 responses = responses_ 79 #-- get information on the responses 80 #filter_info = filters.get_info(responses) 81 #responses = filter_info['photband'] 82 responses = [resp for resp in responses if not (('ACS' in resp) or ('WFPC' in resp) or ('STIS' in resp) or ('ISOCAM' in resp) or ('NICMOS' in resp))] 83 84 logger.info('Selected response curves: {}'.format(', '.join(responses))) 85 86 return responses
87 88 #} 89 90 #{ Limb darkening coefficients 91
92 -def calc_limbdark_grid(responses=None,vrads=[0],ebvs=[0],zs=[0.],\ 93 ld_law='claret',fitmethod='equidist_r_leastsq', 94 outfile=None,force=False,**kwargs):
95 """ 96 Calculate a grid of limb-darkening coefficients. 97 98 You need to specify a list of response curves, and a grid of radial velocity 99 (C{vrads}), metallicity (C{z}) and reddening (C{ebvs}) points. You can 100 choose which C{law} to fit and with which C{fitmethod}. Extra kwargs specify 101 the properties of the atmosphere grid to be used. 102 103 If you give a gridfile that already exists, the current file is simply 104 updated with the new passbands, i.e. all overlapping response curves will not 105 be recomputed. Unless you set C{force=True}, in which case previous calculations 106 will be overwritten. You'd probably only want to update or overwrite existing 107 files if you use the same C{vrads}, C{ebvs}, C{zs} etc... 108 109 The generated FITS file has the following structure: 110 111 1. The primary HDU is empty. The primary header contains only the fit 112 routine (FIT), LD law (LAW) and used grid (GRID) 113 2. Each table extension has the name of the photometric passband (e.g. 114 "GENEVA.V". Each record in the table extension has the following columns: 115 Teff, logg, ebv, vrad, z (the grid points) and a1, a2, a3, a4 (the limb 116 darkening coefficients) and Imu1 (the intensity in the center of the disk) 117 and SRS, dint (fit evaluation parameters, see L{ivs.sed.limbdark}). 118 Although the system and filter can be derived from the extension name, 119 there are also separate entries in the header for "SYSTEM" and "FILTER". 120 121 Example usage: 122 123 >>> calc_limbdark_grid(['MOST.V','GENEVA'],vrads=[0],ebvs=[0.06],zs=[0,0],\ 124 ... law='claret',fitmethod='equidist_r_leastsq',outfile='HD261903.fits') 125 126 127 """ 128 #-- collect response curves from user input 129 photbands = get_responses(responses) 130 131 if outfile is None: 132 outfile = '{}_{}_{}.fits'.format(kwargs.get('grid',limbdark.defaults['grid']),ld_law,fitmethod) 133 #-- add the possibility to update an existing file if it already exists. 134 if os.path.isfile(outfile): 135 hdulist = pf.open(outfile,mode='update') 136 existing_bands = [ext.header['extname'] for ext in hdulist[1:]] 137 else: 138 hdulist = pf.HDUList([]) 139 hdulist.append(pf.PrimaryHDU(np.array([[0,0]]))) 140 existing_bands = [] 141 142 #-- update the header with some information on the fitting parameters 143 hd = hdulist[0].header 144 hd.update('FIT', fitmethod, 'FIT ROUTINE') 145 hd.update('LAW', ld_law, 'FITTED LD LAW') 146 hd.update('GRID', kwargs.get('grid',limbdark.defaults['grid']), 'GRID') 147 148 #-- cycle through all the bands and compute the limb darkening coefficients 149 for i,photband in enumerate(photbands): 150 if photband in existing_bands and not force: 151 logger.info('BAND {} already exists: skipping'.format(photband)) 152 continue 153 logger.info('Calculating photband {} ({}/{})'.format(photband,i+1,len(photbands))) 154 pars,coeffs,Imu1s = limbdark.fit_law_to_grid(photband,vrads=vrads,ebvs=ebvs,zs=zs,\ 155 law=ld_law,fitmethod=fitmethod,**kwargs) 156 cols = [] 157 158 cols.append(pf.Column(name='Teff', format='E', array=pars[:,0])) 159 cols.append(pf.Column(name="logg", format='E', array=pars[:,1])) 160 cols.append(pf.Column(name="ebv" , format='E', array=pars[:,2])) 161 cols.append(pf.Column(name="vrad", format='E', array=pars[:,3])) 162 cols.append(pf.Column(name="z" , format='E', array=pars[:,4])) 163 for col in range(coeffs.shape[1]): 164 cols.append(pf.Column(name='a{:d}'.format(col+1), format='E', array=coeffs[:,col])) 165 cols.append(pf.Column(name='Imu1', format='E', array=Imu1s[:,0])) 166 cols.append(pf.Column(name='SRS', format='E', array=Imu1s[:,1])) 167 cols.append(pf.Column(name='dint', format='E', array=Imu1s[:,2])) 168 169 newtable = pf.new_table(pf.ColDefs(cols)) 170 newtable.header.update('EXTNAME', photband, "SYSTEM.FILTER") 171 newtable.header.update('SYSTEM', photband.split('.')[0], 'PASSBAND SYSTEM') 172 newtable.header.update('FILTER', photband.split('.')[1], 'PASSBAND FILTER') 173 if photband in existing_bands and force: 174 hdulist[hdulist.index_of(photband)] = newtable 175 logger.info("Forced overwrite of {}".format(photband)) 176 else: 177 hdulist.append(newtable) 178 179 #-- clean up 180 if os.path.isfile(outfile): 181 hdulist.close() 182 else: 183 hdulist.writeto(outfile)
184 185 186 #} 187 188 #{ Integrated photometry 189
190 -def calc_integrated_grid(threads=1,ebvs=None,law='fitzpatrick2004',Rv=3.1, 191 units='Flambda',responses=None,update=False,add_spectrophotometry=False,**kwargs):
192 """ 193 Integrate an entire SED grid over all passbands and save to a FITS file. 194 195 The output file can be used to fit SEDs more efficiently, since integration 196 over the passbands has already been carried out. 197 198 WARNING: this function can take a loooooong time to compute! 199 200 Extra keywords can be used to specify the grid. 201 202 @param threads: number of threads 203 @type threads; integer, 'max', 'half' or 'safe' 204 @param ebvs: reddening parameters to include 205 @type ebvs: numpy array 206 @param law: interstellar reddening law to use 207 @type law: string (valid law name, see C{reddening.py}) 208 @param Rv: Rv value for reddening law 209 @type Rv: float 210 @param units: choose to work in 'Flambda' or 'Fnu' 211 @type units: str, one of 'Flambda','Fnu' 212 @param responses: respons curves to add (if None, add all) 213 @type responses: list of strings 214 @param update: if true append to existing FITS file, otherwise overwrite 215 possible existing file. 216 @type update: boolean 217 """ 218 if ebvs is None: 219 ebvs = np.r_[0:4.01:0.01] 220 221 #-- select number of threads 222 if threads=='max': 223 threads = cpu_count() 224 elif threads=='half': 225 threads = cpu_count()/2 226 elif threads=='safe': 227 threads = cpu_count()-1 228 threads = int(threads) 229 if threads > len(ebvs): 230 threads = len(ebvs) 231 logger.info('Threads: %s'%(threads)) 232 233 #-- set the parameters for the SED grid 234 model.set_defaults(**kwargs) 235 #-- get the dimensions of the grid: both the grid points, but also 236 # the wavelength range 237 teffs,loggs = model.get_grid_dimensions() 238 wave,flux = model.get_table(teff=teffs[0],logg=loggs[0]) 239 #-- get the response functions covering the wavelength range of the models 240 # also get the information on those filters 241 responses = get_responses(responses=responses,\ 242 add_spectrophotometry=add_spectrophotometry,wave=wave) 243 244 #-- definition of one process: 245 def do_ebv_process(ebvs,arr,responses): 246 logger.debug('EBV: %s-->%s (%d)'%(ebvs[0],ebvs[-1],len(ebvs))) 247 for ebv in ebvs: 248 flux_ = reddening.redden(flux,wave=wave,ebv=ebv,rtype='flux',law=law,Rv=Rv) 249 #-- calculate synthetic fluxes 250 synflux = model.synthetic_flux(wave,flux_,responses,units=units) 251 arr.append([np.concatenate(([ebv],synflux))]) 252 logger.debug("Finished EBV process (len(arr)=%d)"%(len(arr)))
253 254 #-- do the calculations 255 c0 = time.time() 256 output = np.zeros((len(teffs)*len(ebvs),4+len(responses))) 257 start = 0 258 logger.info('Total number of tables: %i'%(len(teffs))) 259 exceptions = 0 260 exceptions_logs = [] 261 for i,(teff,logg) in enumerate(zip(teffs,loggs)): 262 if i>0: 263 logger.info('%s %s %s %s: ET %d seconds'%(teff,logg,i,len(teffs),(time.time()-c0)/i*(len(teffs)-i))) 264 265 #-- get model SED and absolute luminosity 266 wave,flux = model.get_table(teff=teff,logg=logg) 267 Labs = model.luminosity(wave,flux) 268 269 #-- threaded calculation over all E(B-V)s 270 processes = [] 271 manager = Manager() 272 arr = manager.list([]) 273 all_processes = [] 274 for j in range(threads): 275 all_processes.append(Process(target=do_ebv_process,args=(ebvs[j::threads],arr,responses))) 276 all_processes[-1].start() 277 for p in all_processes: 278 p.join() 279 280 try: 281 #-- collect the results and add them to 'output' 282 arr = np.vstack([row for row in arr]) 283 sa = np.argsort(arr[:,0]) 284 arr = arr[sa] 285 output[start:start+arr.shape[0],:3] = teff,logg,Labs 286 output[start:start+arr.shape[0],3:] = arr 287 start += arr.shape[0] 288 except: 289 logger.warning('Exception in calculating Teff=%f, logg=%f'%(teff,logg)) 290 logger.debug('Exception: %s'%(sys.exc_info()[1])) 291 exceptions = exceptions + 1 292 exceptions_logs.append(sys.exc_info()[1]) 293 294 #-- make FITS columns 295 gridfile = model.get_file() 296 if os.path.isfile(os.path.basename(gridfile)): 297 outfile = os.path.basename(gridfile) 298 else: 299 outfile = os.path.join(os.path.dirname(gridfile),'i{0}'.format(os.path.basename(gridfile))) 300 outfile = 'i{0}'.format(os.path.basename(gridfile)) 301 outfile = os.path.splitext(outfile) 302 outfile = outfile[0]+'_law{0}_Rv{1:.2f}'.format(law,Rv)+outfile[1] 303 logger.info('Precaution: making original grid backup at {0}.backup'.format(outfile)) 304 if os.path.isfile(outfile): 305 shutil.copy(outfile,outfile+'.backup') 306 output = output.T 307 if not update or not os.path.isfile(outfile): 308 cols = [pf.Column(name='teff',format='E',array=output[0]), 309 pf.Column(name='logg',format='E',array=output[1]), 310 pf.Column(name='ebv',format='E',array=output[3]), 311 pf.Column(name='Labs',format='E',array=output[2])] 312 for i,photband in enumerate(responses): 313 cols.append(pf.Column(name=photband,format='E',array=output[4+i])) 314 #-- make FITS columns but copy the existing ones 315 else: 316 hdulist = pf.open(outfile,mode='update') 317 names = hdulist[1].columns.names 318 cols = [pf.Column(name=name,format='E',array=hdulist[1].data.field(name)) for name in names] 319 for i,photband in enumerate(responses): 320 cols.append(pf.Column(name=photband,format='E',array=output[4+i])) 321 322 #-- make FITS extension and write grid/reddening specifications to header 323 table = pf.new_table(pf.ColDefs(cols)) 324 table.header.update('gridfile',os.path.basename(gridfile)) 325 for key in sorted(model.defaults.keys()): 326 key_ = (len(key)>8) and 'HIERARCH '+key or key 327 table.header.update(key_,model.defaults[key]) 328 for key in sorted(kwargs.keys()): 329 key_ = (len(key)>8) and 'HIERARCH '+key or key 330 table.header.update(key_,kwargs[key]) 331 table.header.update('FLUXTYPE',units) 332 table.header.update('REDLAW',law,'interstellar reddening law') 333 table.header.update('RV',Rv,'interstellar reddening parameter') 334 335 #-- make/update complete FITS file 336 if not update or not os.path.isfile(outfile): 337 if os.path.isfile(outfile): 338 os.remove(outfile) 339 logger.warning('Removed existing file: %s'%(outfile)) 340 hdulist = pf.HDUList([]) 341 hdulist.append(pf.PrimaryHDU(np.array([[0,0]]))) 342 hdulist.append(table) 343 hdulist.writeto(outfile) 344 logger.info("Written output to %s"%(outfile)) 345 else: 346 hdulist[1] = table 347 hdulist.flush() 348 hdulist.close() 349 logger.info("Appended output to %s"%(outfile)) 350 351 logger.warning('Encountered %s exceptions!'%(exceptions)) 352 for i in exceptions_logs: 353 print 'ERROR' 354 print i 355
356 -def update_grid(gridfile,responses,threads=10):
357 """ 358 Add passbands to an existing grid. 359 """ 360 shutil.copy(gridfile,gridfile+'.backup') 361 hdulist = pf.open(gridfile,mode='update') 362 existing_responses = set(list(hdulist[1].columns.names)) 363 responses = sorted(list(set(responses) - existing_responses)) 364 if not len(responses): 365 hdulist.close() 366 print "No new responses to do" 367 return None 368 law = hdulist[1].header['REDLAW'] 369 units = hdulist[1].header['FLUXTYPE'] 370 teffs = hdulist[1].data.field('teff') 371 loggs = hdulist[1].data.field('logg') 372 ebvs = hdulist[1].data.field('ebv') 373 zs = hdulist[1].data.field('z') 374 rvs = hdulist[1].data.field('rv') 375 vrads = hdulist[1].data.field('vrad') 376 names = hdulist[1].columns.names 377 378 N = len(teffs) 379 index = np.arange(N) 380 381 output = np.zeros((len(responses),len(teffs))) 382 print N 383 384 #--- PARALLEL PROCESS 385 def do_process(teffs,loggs,ebvs,zs,rvs,index,arr): 386 output = np.zeros((len(responses)+1,len(teffs))) 387 c0 = time.time() 388 N = len(teffs) 389 for i,(teff,logg,ebv,z,rv,ind) in enumerate(zip(teffs,loggs,ebvs,zs,rvs,index)): 390 if i%100==0: 391 dt = time.time()-c0 392 print "ETA",index[0],(N-i)/100.*dt/3600.,'hr' 393 c0 = time.time() 394 #-- get model SED and absolute luminosity 395 model.set_defaults(z=z) 396 wave,flux = model.get_table(teff,logg) 397 Labs = model.luminosity(wave,flux) 398 flux_ = reddening.redden(flux,wave=wave,ebv=ebv,rtype='flux',law=law,Rv=rv) 399 #-- calculate synthetic fluxes 400 output[0,i] = ind 401 output[1:,i] = model.synthetic_flux(wave,flux_,responses,units=units) 402 arr.append(output)
403 #--- PARALLEL PROCESS 404 c0 = time.time() 405 406 manager = Manager() 407 arr = manager.list([]) 408 409 all_processes = [] 410 for j in range(threads): 411 all_processes.append(Process(target=do_process,args=(teffs[j::threads],\ 412 loggs[j::threads],\ 413 ebvs[j::threads],\ 414 zs[j::threads],\ 415 rvs[j::threads],\ 416 index[j::threads],arr))) 417 all_processes[-1].start() 418 for p in all_processes: 419 p.join() 420 421 output = np.hstack([res for res in arr]) 422 del arr 423 sa = np.argsort(output[0]) 424 output = output[:,sa][1:] 425 ascii.write_array(np.rec.fromarrays(output,names=responses),'test.temp',header=True) 426 #-- copy old columns and append new ones 427 cols = [] 428 for i,photband in enumerate(responses): 429 cols.append(pf.Column(name=photband,format='E',array=output[i])) 430 #-- create new table 431 table = pf.new_table(pf.ColDefs(cols)) 432 table = pf.new_table(hdulist[1].columns + table.columns,header=hdulist[1].header) 433 hdulist[1] = table 434 hdulist.close() 435 436
437 -def fix_grid(grid):
438 hdulist = pf.open(grid,mode='update') 439 names = hdulist[1].columns.names 440 for i,name in enumerate(names): 441 if name.lower() in ['teff','logg','ebv','labs','vrad','rv','z']: 442 names[i] = name.lower() 443 cols = [pf.Column(name=name,format='E',array=hdulist[1].data.field(name)) for name in names] 444 N = len(hdulist[1].data) 445 446 keys = [key.lower() for key in hdulist[1].header.keys()] 447 448 if not 'z' in names: 449 z = hdulist[1].header['z'] 450 logger.info('Adding metallicity from header {}'.format(z)) 451 cols.append(pf.Column(name='z',format='E',array=np.ones(N)*z)) 452 else: 453 logger.info("Metallicity already in there") 454 if not 'vrad' in names: 455 vrad = 0. 456 logger.info('Adding radial velocity {}'.format(vrad)) 457 cols.append(pf.Column(name='vrad',format='E',array=np.ones(N)*vrad)) 458 else: 459 logger.info("Radial velocity already in there") 460 461 fix_rv = False 462 if not 'rv' in names: 463 if 'rv' in keys: 464 rv = hdulist[1].header['Rv'] 465 logger.info("Adding interstellar Rv from header {}".format(rv)) 466 else: 467 rv = 3.1 468 logger.info("Adding default interstellar Rv {}".format(rv)) 469 cols.append(pf.Column(name='rv',format='E',array=np.ones(N)*rv)) 470 elif not hdulist[1].header['Rv']==hdulist[1].data.field('rv')[0]: 471 rv = hdulist[1].header['Rv'] 472 fix_rv = rv 473 logger.info('Correcting interstellar Rv with {}'.format(rv)) 474 else: 475 logger.info("Interstellar Rv already in there") 476 477 table = pf.new_table(pf.ColDefs(cols)) 478 if fix_rv: 479 table.data.field('rv')[:] = rv 480 fake_keys = [key.lower() for key in table.header.keys()] 481 fake_keys.append('use_scratch') # Don't know why this is nessessary but it doesn't work otherwise (JV 23.7.13) !!! 482 for key in hdulist[1].header.keys(): 483 if not key.lower() in fake_keys: 484 if len(key)>8: 485 key = 'HIERARCH '+key 486 table.header.update(key,hdulist[1].header[key]) 487 hdulist[1] = table 488 print "Axis:" 489 for name in hdulist[1].columns.names: 490 if name.islower() and not name=='labs': 491 ax = np.unique(hdulist[1].data.field(name)) 492 print name,len(ax),min(ax),max(ax) 493 494 teffs = hdulist[1].data.field('teff') 495 loggs = hdulist[1].data.field('logg') 496 ebvs = hdulist[1].data.field('ebv') 497 498 #for logg in np.unique(loggs): 499 #keep = loggs==logg 500 #plt.figure() 501 #plt.title(logg) 502 #plt.plot(teffs[keep],ebvs[keep],'ko',ms=2) 503 504 keep = hdulist[1].data.field('teff')>0 505 logger.info('Removing {}/{} false entries'.format(sum(-keep),len(keep))) 506 #print len(ff[1].data),sum(keep) 507 hdulist[1].data = hdulist[1].data[keep] 508 hdulist.close()
509 510 #} 511 512 if __name__=="__main__": 513 logger = loggers.get_basic_logger() 514 imethod,iargs,ikwargs = argkwargparser.parse() 515 output = globals()[imethod](*iargs,**ikwargs) 516