1
0
forked from erosita/uds
coma/coma/uds/sherpa.py
2024-10-14 15:02:56 +03:00

360 lines
12 KiB
Python

import glob
import os
import sys
import numpy as np
from astropy.io import fits
import pickle
from scipy.stats import sem
#import matplotlib.pyplot as plt
#plt.style.use('ggplot')
from sherpa.astro.ui import *
def calc_ecf(path,catprep=None, emin=None, emax=None, eband=None, outfile=None, fitmin=0.2, fitmax=5.0, simnum=100):
"""
Do uncorrection of SPECRESP for CORRCOMP=CORRPSF*CORRVIGN, like this:
SPECRESP=SPECRESP/CORRCOMB, because because ermldet does not correct for PSF and vignetting.
"""
data={}
if (catprep == None):
print("catprep file is not provided, exit")
sys.exit()
else:
print("Reading {}".format(catprep))
catprep_hdul = fits.open(catprep,mode='readonly')
catprep_data = catprep_hdul[1].data
#print(catprep_data['detUID'])
mlrate = {}
mlrate_err = {}
dl = {}
for rec in catprep_data:
srcid=rec['detUID']
""" consider only non-extended sources """
#if(rec['EXT'] > 0.0):
# continue
if(srcid in mlrate.keys()):
print("Duplicate SrcID! {}".format(srcid))
sys.exit()
else:
mlrate[srcid]=rec['ML_RATE_0']
mlrate_err[srcid]=rec['ML_RATE_ERR_0']
dl[srcid]=rec['DET_LIKE_0']
flist=glob.glob(path)
for filename in flist:
print("Reading {}".format(filename))
#fout=filename.replace(".fits", ".uncorr.fits")
pha=filename.replace("ARF", "SourceSpec")
pha=pha.replace(".fits", ".fits.grp")
pha_header = fits.getheader(pha,ext=1)
object = pha_header['OBJECT']
""" check a given source (testing...) """
#if (int(object) > 10):
# continue
srcid="ML{}".format(object)
if not (srcid in mlrate.keys()):
print("Skip {}".format(srcid))
continue
#hdul = fits.open(filename,mode='readonly')
#specresp = np.divide(hdul[1].data['SPECRESP'], hdul[1].data['CORRCOMB'])
#hdul[1].data['SPECRESP'] = specresp
#hdul.writeto(fout, overwrite=True)
clean() # sherpa
set_xsxsect("vern")
set_xsabund('wilm')
load_pha(pha) # sherpa
""" load uncorrected ARF """
# load_arf(fout) # sherpa
# Define the source model
#sh.set_source("powlaw1d.p1")
set_source("xstbabs.abs1 * powlaw1d.p1") # sherpa
abs1.nH = 0.02
# Change reference energy of the model
p1.ref = 1 # 1 keV
p1.gamma = 2.0
p1.ampl = 1e-4 # in cm**-2 s**-1 keV**-1
freeze(abs1.nH, p1.gamma) # sherpa
### Define the statistic
set_stat("wstat") # sherpa
ignore_bad() # sherpa
### Define the fit range
print("Notice 0.3-5.0 keV:")
notice(fitmin, fitmax) # sherpa
""" Check wether spectrum still has counts after filtering """
dep = get_dep(filter=True)
if not (len(dep)):
print("*** Skip {} due to low counts".format(srcid))
continue
### Do the fit
fit()
res_fit=get_fit_results()
rstat=res_fit.rstat
set_analysis('energy')
photon_flx=[]
energy_flx=[]
skipme=False
for ie in range(len(eband)):
ph_flx=calc_photon_flux(emin[ie], emax[ie])
en_flx=calc_energy_flux(emin[ie], emax[ie])
if not (ph_flx>0):
skipme=True
photon_flx.append(ph_flx)
energy_flx.append(en_flx)
print("en{} Photon flux: {:.4E} Energy flux: {:.4E}".format(eband[ie], ph_flx, en_flx))
if (skipme == True):
print("*** Skip zero flux for {}".format(srcid))
continue
print("----> Success: {} pow norm: {:.4E} reduced stat: {:.2f}".format(res_fit.succeeded,res_fit.parvals[0], rstat))
sample_flx=[]
sample_flx_lo=[]
sample_flx_hi=[]
if (res_fit.rstat < 3.0):
conf()
res_conf=get_conf_results()
#print(res_conf)
component=abs1+p1
#print(component)
#covar()
#errs = get_covar_results().parmaxes
#ans = sample_flux(correlated=False, scales=errs, num=500)
for ie in range(len(eband)):
pars_flux = sample_flux(modelcomponent=component, lo=emin[ie], hi=emax[ie], num=simnum, correlated=True, numcores=10, confidence=68)
sample_flx.append(pars_flux[0][0])
sample_flx_lo.append(pars_flux[0][2])
sample_flx_hi.append(pars_flux[0][1])
else:
continue
print("### SAMPLE FLUX: {}".format(sample_flx))
data[srcid]={'sample_flx':sample_flx,
'sample_flx_lo':sample_flx_lo,
'sample_flx_hi':sample_flx_hi,
'photon_flx':photon_flx,
'energy_flx':energy_flx,
'ml_rate':mlrate[srcid],
'ml_rate_err':mlrate_err[srcid],
'det_like':dl[srcid]}
with open(outfile, 'wb') as f:
pickle.dump(data, f)
def calc_flux(path,catprep=None, emin=None, emax=None, eband=None, outfile=None, fitmin=0.2, fitmax=5.0, simnum=100):
""" """
data={}
if (catprep == None):
print("catprep file is not provided, exit")
sys.exit()
else:
print("Reading {}".format(catprep))
catprep_hdul = fits.open(catprep,mode='readonly')
catprep_data = catprep_hdul[1].data
#print(catprep_data['detUID'])
mlrate = {}
mlrate_err = {}
dl = {}
for rec in catprep_data:
srcid=rec['detUID']
""" consider only non-extended sources """
#if(rec['EXT'] > 0.0):
# continue
if(srcid in mlrate.keys()):
print("Duplicate SrcID! {}".format(srcid))
sys.exit()
else:
mlrate[srcid]=rec['ML_RATE_0']
mlrate_err[srcid]=rec['ML_RATE_ERR_0']
dl[srcid]=rec['DET_LIKE_0']
flist=glob.glob(path)
for filename in flist:
print("Reading {}".format(filename))
pha=filename.replace("ARF", "SourceSpec")
pha=pha.replace(".fits", ".fits.grp")
pha_header = fits.getheader(pha,ext=1)
object = pha_header['OBJECT']
srcid="ML{}".format(object)
if not (srcid in mlrate.keys()):
print("Skip {}".format(srcid))
continue
clean() # sherpa
set_xsxsect("vern")
set_xsabund('wilm')
load_pha(pha) # sherpa
# Define the source model
#sh.set_source("powlaw1d.p1")
set_source("xstbabs.abs1 * powlaw1d.p1") # sherpa
abs1.nH = 0.02
# Change reference energy of the model
p1.ref = 1 # 1 keV
p1.gamma = 2.0
p1.ampl = 1e-4 # in cm**-2 s**-1 keV**-1
freeze(abs1.nH, p1.gamma) # sherpa
### Define the statistic
set_stat("wstat") # sherpa
photon_flx=[]
energy_flx=[]
skipme=False
for ie in range(len(eband)):
ignore_bad() # sherpa
### Define the fit range
print("Notice {}-{} keV:".format(emin[ie], emax[ie]))
notice(emin[ie], emax[ie]) # sherpa
""" Check wether spectrum still has counts after filtering """
dep = get_dep(filter=True)
if not (len(dep)):
print("*** Skip {} due to low counts".format(srcid))
continue
### Do the fit
fit()
res_fit=get_fit_results()
rstat=res_fit.rstat
set_analysis('energy')
ph_flx=calc_photon_flux(emin[ie], emax[ie])
en_flx=calc_energy_flux(emin[ie], emax[ie])
if not (ph_flx>0):
skipme=True
photon_flx.append(ph_flx)
energy_flx.append(en_flx)
print("en{} Photon flux: {:.4E} Energy flux: {:.4E}".format(eband[ie], ph_flx, en_flx))
print("----> Success: {} pow norm: {:.4E} reduced stat: {:.2f}".format(res_fit.succeeded,res_fit.parvals[0], rstat))
if (skipme == True):
print("*** Skip zero flux for {}".format(srcid))
continue
sample_flx=[]
sample_flx_lo=[]
sample_flx_hi=[]
if (res_fit.rstat < 3.0):
conf()
res_conf=get_conf_results()
#print(res_conf)
component=abs1+p1
#print(component)
#covar()
#errs = get_covar_results().parmaxes
#ans = sample_flux(correlated=False, scales=errs, num=500)
for ie in range(len(eband)):
pars_flux = sample_flux(modelcomponent=component, lo=emin[ie], hi=emax[ie], num=simnum, correlated=True, numcores=10, confidence=68)
sample_flx.append(pars_flux[0][0])
sample_flx_lo.append(pars_flux[0][2])
sample_flx_hi.append(pars_flux[0][1])
else:
continue
print("### SAMPLE FLUX: {}".format(sample_flx))
data[srcid]={'sample_flx':sample_flx,
'sample_flx_lo':sample_flx_lo,
'sample_flx_hi':sample_flx_hi,
'photon_flx':photon_flx,
'energy_flx':energy_flx,
'ml_rate':mlrate[srcid],
'ml_rate_err':mlrate_err[srcid],
'det_like':dl[srcid]}
with open(outfile, 'wb') as f:
pickle.dump(data, f)
def print_ecf(infile=None, emin=None, emax=None, eband=None, skipfrac=5.0):
with open(infile, 'rb') as f:
data = pickle.load(f)
for ie in range(len(eband)):
print()
ecf=[]
for key in data.keys():
ar=data[key]
ml = ar['ml_rate']
dml = ar['ml_rate_err']
flx = ar['sample_flx'][ie]
dflx1 = ar['sample_flx'][ie] - ar['sample_flx_lo'][ie]
dflx2 = ar['sample_flx_hi'][ie] - ar['sample_flx'][ie]
# take maximum error as conservative approach
dflx = np.maximum(dflx1,dflx2)
ecf0 = ml/flx
decf0 = abs(flx*dml - ml*dflx) / np.power(flx,2)
frac0 = 100 * abs(flx*dml - ml*dflx) / (flx*ml)
skipme=False
if((100*dflx/flx) < skipfrac):
skipme=False
else:
skipme=True
#if not (len(ar['sample_flx'])>0):
# continue
print("en{} {} DL={:.2f} FLUX: {:4E} (-{:.4E} +{:.4E} {:.2f}%) ML_RATE: {:.4f} +/- {:.4f} ({:.2f}%) --> {:.4E} +/- {:.4E} {:.2f}% skip={}".format(eband[ie],key,ar['det_like'],
flx,dflx1,dflx2,100*dflx/flx,
ml,dml,100*dml/ml,
ecf0,decf0,frac0,skipme))
if(skipme==True):
continue
ecf.append(ecf0)
ecf_mean = np.mean(ecf)
ecf_err = np.std(ecf, ddof=1)/np.sqrt(np.size(ecf))
print("\n*** en{} ecf {:.4E} +/- {:.4E} {:.2f}% N={}".format(eband[ie],
ecf_mean,
ecf_err,100*ecf_err/ecf_mean,
np.size(ecf)))
sys.exit()