1
0
forked from erosita/uds
This commit is contained in:
2023-03-29 17:28:29 +03:00
parent 09deb0e4f0
commit 4e6025f357
16 changed files with 792 additions and 58 deletions

View File

@@ -51,7 +51,25 @@ emax_ev=[2300, 600, 2300, 5000, 10000,8000]
emin_kev=[0.3, 0.3, 0.6, 2.3, 0.2, 0.3]
emax_kev=[2.3, 0.6, 2.3, 5.0, 10.0, 8.0]
ecf = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#ecf = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
ecf = [9.7817E+11, 3.2982E+12, 1.3903E+12, 2.3322E+12, 5.2022E+11, 5.8453E+11]
"""
*** en0 ecf 9.7817E+11 +/- 2.4606E+10 2.52% N=17
*** en1 ecf 3.2982E+12 +/- 8.2963E+10 2.52% N=17
*** en2 ecf 1.3903E+12 +/- 3.5036E+10 2.52% N=17
*** en3 ecf 2.3322E+12 +/- 5.8717E+10 2.52% N=17
*** en4 ecf 5.2022E+11 +/- 1.3110E+10 2.52% N=17
*** en5 ecf 5.8453E+11 +/- 1.4743E+10 2.52% N=17
"""
outfile_post='.fits'
"""
Pavel Medvedev:
0.3-2.3: 9.135819435325375e-13
0.3-0.6: 9.160477830652834e-13
0.6-2.3: 9.201664167869427e-13
2.3-5.0: 8.721504826794627e-12
"""

219
uds/uds/sherpa.py Normal file
View File

@@ -0,0 +1,219 @@
import glob
import os
import sys
import numpy as np
from astropy.io import fits
import pickle
from scipy.stats import sem
#import matplotlib.pyplot as plt
#plt.style.use('ggplot')
from sherpa.astro.ui import *
def calc_ecf(path,catprep=None, emin=None, emax=None, eband=None, outfile=None, fitmin=0.2, fitmax=5.0, simnum=100):
"""
Do uncorrection of SPECRESP for CORRCOMP=CORRPSF*CORRVIGN, like this:
SPECRESP=SPECRESP/CORRCOMB, because because ermldet does not correct for PSF and vignetting.
"""
data={}
if (catprep == None):
print("catprep file is not provided, exit")
sys.exit()
else:
print("Reading {}".format(catprep))
catprep_hdul = fits.open(catprep,mode='readonly')
catprep_data = catprep_hdul[1].data
#print(catprep_data['detUID'])
mlrate = {}
mlrate_err = {}
dl = {}
for rec in catprep_data:
srcid=rec['detUID']
""" consider only non-extended sources """
if(rec['EXT'] > 0.0):
continue
if(srcid in mlrate.keys()):
print("Duplicate SrcID! {}".format(srcid))
sys.exit()
else:
mlrate[srcid]=rec['ML_RATE_0']
mlrate_err[srcid]=rec['ML_RATE_ERR_0']
dl[srcid]=rec['DET_LIKE_0']
flist=glob.glob(path)
for filename in flist:
print("Reading {}".format(filename))
fout=filename.replace(".fits", ".uncorr.fits")
pha=filename.replace("ARF", "SourceSpec")
pha=pha.replace(".fits", ".fits.grp")
pha_header = fits.getheader(pha,ext=1)
object = pha_header['OBJECT']
""" check a given source (testing...) """
#if (int(object) > 10):
# continue
srcid="ML{}".format(object)
if not (srcid in mlrate.keys()):
print("Skip {}".format(srcid))
continue
hdul = fits.open(filename,mode='readonly')
specresp = np.divide(hdul[1].data['SPECRESP'], hdul[1].data['CORRCOMB'])
hdul[1].data['SPECRESP'] = specresp
hdul.writeto(fout, overwrite=True)
clean() # sherpa
set_xsxsect("vern")
set_xsabund('wilm')
load_pha(pha) # sherpa
""" load uncorrected ARF """
# load_arf(fout) # sherpa
# Define the source model
#sh.set_source("powlaw1d.p1")
set_source("xstbabs.abs1 * powlaw1d.p1") # sherpa
abs1.nH = 0.02
# Change reference energy of the model
p1.ref = 1 # 1 keV
p1.gamma = 2.0
p1.ampl = 1e-4 # in cm**-2 s**-1 keV**-1
freeze(abs1.nH, p1.gamma) # sherpa
### Define the statistic
set_stat("wstat") # sherpa
ignore_bad() # sherpa
### Define the fit range
print("Notice 0.3-5.0 keV:")
notice(fitmin, fitmax) # sherpa
""" Check wether spectrum still has counts after filtering """
dep = get_dep(filter=True)
if not (len(dep)):
print("*** Skip {} due to low counts".format(srcid))
continue
### Do the fit
fit()
res_fit=get_fit_results()
rstat=res_fit.rstat
set_analysis('energy')
photon_flx=[]
energy_flx=[]
skipme=False
for ie in range(len(eband)):
ph_flx=calc_photon_flux(emin[ie], emax[ie])
en_flx=calc_energy_flux(emin[ie], emax[ie])
if not (ph_flx>0):
skipme=True
photon_flx.append(ph_flx)
energy_flx.append(en_flx)
print("en{} Photon flux: {:.4E} Energy flux: {:.4E}".format(eband[ie], ph_flx, en_flx))
if (skipme == True):
print("*** Skip zero flux for {}".format(srcid))
continue
print("----> Success: {} pow norm: {:.4E} reduced stat: {:.2f}".format(res_fit.succeeded,res_fit.parvals[0], rstat))
sample_flx=[]
sample_flx_lo=[]
sample_flx_hi=[]
if (res_fit.rstat < 3.0):
conf()
res_conf=get_conf_results()
#print(res_conf)
component=abs1+p1
#print(component)
#covar()
#errs = get_covar_results().parmaxes
#ans = sample_flux(correlated=False, scales=errs, num=500)
for ie in range(len(eband)):
pars_flux = sample_flux(modelcomponent=component, lo=emin[ie], hi=emax[ie], num=simnum, correlated=True, numcores=10, confidence=68)
sample_flx.append(pars_flux[0][0])
sample_flx_lo.append(pars_flux[0][2])
sample_flx_hi.append(pars_flux[0][1])
else:
continue
print("### SAMPLE FLUX: {}".format(sample_flx))
data[srcid]={'sample_flx':sample_flx,
'sample_flx_lo':sample_flx_lo,
'sample_flx_hi':sample_flx_hi,
'photon_flx':photon_flx,
'energy_flx':energy_flx,
'ml_rate':mlrate[srcid],
'ml_rate_err':mlrate_err[srcid],
'det_like':dl[srcid]}
with open(outfile, 'wb') as f:
pickle.dump(data, f)
def print_ecf(infile=None, emin=None, emax=None, eband=None, skipfrac=5.0):
with open(infile, 'rb') as f:
data = pickle.load(f)
for ie in range(len(eband)):
print()
ecf=[]
for key in data.keys():
ar=data[key]
ml = ar['ml_rate']
dml = ar['ml_rate_err']
flx = ar['sample_flx'][ie]
dflx1 = ar['sample_flx'][ie] - ar['sample_flx_lo'][ie]
dflx2 = ar['sample_flx_hi'][ie] - ar['sample_flx'][ie]
# take maximum error as conservative approach
dflx = np.maximum(dflx1,dflx2)
ecf0 = ml/flx
decf0 = abs(flx*dml - ml*dflx) / np.power(flx,2)
frac0 = 100 * abs(flx*dml - ml*dflx) / (flx*ml)
skipme=False
if((100*dflx/flx) < skipfrac):
skipme=False
else:
skipme=True
#if not (len(ar['sample_flx'])>0):
# continue
print("en{} {} DL={:.2f} FLUX: {:4E} (-{:.4E} +{:.4E} {:.2f}%) ML_RATE: {:.4f} +/- {:.4f} ({:.2f}%) --> {:.4E} +/- {:.4E} {:.2f}% skip={}".format(eband[ie],key,ar['det_like'],
flx,dflx1,dflx2,100*dflx/flx,
ml,dml,100*dml/ml,
ecf0,decf0,frac0,skipme))
if(skipme==True):
continue
ecf.append(ecf0)
ecf_mean = np.mean(ecf)
ecf_err = np.std(ecf, ddof=1)/np.sqrt(np.size(ecf))
print("\n*** en{} ecf {:.4E} +/- {:.4E} {:.2f}% N={}".format(eband[ie],
ecf_mean,
ecf_err,100*ecf_err/ecf_mean,
np.size(ecf)))

View File

@@ -1,5 +1,6 @@
import os
import sys
import csv
import numpy as np
from astropy.io import fits
from astropy import wcs
@@ -9,7 +10,7 @@ from astropy.io.fits import getdata
import glob
from fitsio import FITS
from pathlib import Path
import pandas
from astropy.table import QTable, Table, Column
from astropy import units as u
@@ -983,7 +984,7 @@ def create_sensmap(sensmap=None,expmap=None,backmap=None,detmask=None,areatab=No
"ext_flag=no",
"extlike_flag=no",
"compress_flag=no",
"area_flag=no",]
"area_flag=yes",]
remove_file(sensmap)
remove_file(areatab)
os.system((" ").join(cmd))
@@ -1005,3 +1006,157 @@ def group_spectra(path,group_min=25):
]
print((" ").join(cmd))
os.system((" ").join(cmd))
def check_ermldet_forced(infile):
if(os.path.isfile(infile)==False):
print("File not found {}".format(infile))
print("reading mllist {}".format(infile))
hdul = fits.open(infile)
tbdata = hdul[1].data
hdul.close()
for rec in tbdata:
idsrc=rec['ID_SRC']
boxid = rec['BOX_ID_SRC']
if(idsrc != boxid):
print("The ermldet catalog in forced mode should contain only unique sources.")
print("Use fitpos_flag=no, fitext_flag=no, nmulsou=1")
print("{} != {}".format(idsrc,boxid))
sys.exit()
def make_catalog(infile=None, dlmin=6.0,dlmax=10,scale=60*60,ext_like=0.0,outkey='main',emin=None, emax=None, eband=None, ecf=None,
infile_en03cat=None,infile_en03sens=None):
if(os.path.isfile(infile)==False):
print("File not found {}".format(infile))
print("reading catprep {}".format(infile))
fout_selected=infile.replace(".fits", ".{}.selected.csv".format(outkey))
fout_skip=infile.replace(".fits", ".{}.skip.reg".format(outkey))
fout_extended=infile.replace(".fits", ".extended.reg")
""" Read en03 energy band """
en03cat={}
hdul = fits.open(infile_en03cat)
en03tab = hdul[1].data
hdul.close()
for rec in en03tab:
key = rec['detUID']
en03cat[key]={'ra':rec['RA'], 'dec':rec['DEC'], 'det_like':rec['DET_LIKE_0'],
'ml_rate':rec['ML_RATE_0'], 'ml_rate_err':rec['ML_RATE_ERR_0'],
'ml_flux':rec['ML_FLUX_0'], 'ml_flux_err':rec['ML_FLUX_ERR_0'],'sens':0.0}
print("reading {}".format(infile_en03sens))
hdul = fits.open(infile_en03sens)
en03sens = hdul[0].data
en03hdr = hdul[0].header
hdul.close()
en03wcs = WCS(en03hdr)
for key in en03cat.keys():
ra=en03cat[key]['ra']
dec=en03cat[key]['dec']
crd = SkyCoord(ra, dec, frame=FK5(), unit="deg")
pix = en03wcs.wcs_world2pix([(ra, dec),], 1)
px=round(pix[0][0])-1
py=round(pix[0][1])-1
en03cat[key]['sens']=en03sens[py,px]
#print(key,ra,dec,px,py,value)
#(ra0,dec0), = en03wcs.wcs_pix2world(pix, 1)
#crd0 = SkyCoord(ra0, dec0, frame=FK5(), unit="deg")
#sep = crd.separation(crd0).arcsec
hdul = fits.open(infile)
tbdata = hdul[1].data
hdul.close()
catsel=[]
catskip=[]
catext=[]
skip_count=0
selected_count=0
keepalive_count=0
for rec in tbdata:
src_crd = SkyCoord(rec['ra'], rec['dec'], frame=FK5(), unit="deg")
key = rec['detUID']
if not (key in en03cat.keys()):
print("{} not found in {}".format(rec['detUID'],infile_en03cat))
sys.exit()
else:
""" check coords """
en03_crd = SkyCoord(en03cat[key]['ra'], en03cat[key]['dec'], frame=FK5(), unit="deg")
sep = src_crd.separation(en03_crd).arcsec
if(sep > 1.0):
print("{} significant offset: {.4f} arcsec".format(key,sep))
sys.exit()
pass
if (rec['ext_like'] >= ext_like):
catext.append({'ra':rec['ra'],'dec':rec['dec'],'radec_err':rec['radec_err'],
'det_like':rec['det_like_0'],'ext_like':rec['ext_like'],
'ext':rec['ext'],'ext_err':rec['ext_err'],
'src_id':rec['id_src']})
if ((rec['det_like_0'] > dlmin and rec['det_like_0'] < dlmax and rec['ext_like'] < ext_like)):
catsel.append({'ra':rec['ra'],
'dec':rec['dec'],
'radec_err':rec['radec_err'],
'det_like':rec['det_like_0'],
'src_id':rec['id_src'],
'ml_rate':rec['ml_rate_0'],
'ml_rate_err':rec['ml_rate_err_0'],
'ml_flux':rec['ml_flux_0']/ecf,
'ml_flux_err':rec['ml_flux_err_0']/ecf,
'en03_dl':en03cat[key]['det_like'],
'en03_flux':en03cat[key]['ml_flux'],
'en03_flux_err':en03cat[key]['ml_flux_err'],
'en03_sens':en03cat[key]['sens'],
},
)
selected_count=selected_count + 1
else:
catskip.append({'ra':rec['ra'],'dec':rec['dec'],'radec_err':rec['radec_err'],'det_like':rec['det_like_0'],
'src_id':rec['id_src']})
skip_count = skip_count+1
print("total={} skip_count={} selected={}".format(len(tbdata),skip_count,selected_count))
with open(fout_selected, 'w') as csvfile:
fieldnames = ['src_id', 'ra', 'dec', 'radec_err', 'det_like','ml_rate','ml_rate_err','ml_flux','ml_flux_err','en03_dl','en03_flux','en03_flux_err','en03_sens']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for rec in catsel:
writer.writerow(rec)
# writer.writerow({'src_id':rec['src_id'],
# 'ra':rec['ra'],
# 'dec':rec['dec'],
# 'radec_err':rec['radec_err'],
# 'det_like':rec['det_like']})
return
with open(fout_skip, 'w') as writer:
for rec in catskip:
writer.write("fk5;circle({}, {}, {}) # color=red text={{{} {:.2f}}}\n".format(rec['ra'],rec['dec'],rec['radec_err']/scale,
rec['src_id'],rec['det_like']))
with open(fout_extended, 'w') as writer:
for rec in catext:
writer.write("fk5;circle({}, {}, {}) # color=magenta text={{{} {:.2f} {:.2f}}}\n".format(rec['ra'],
rec['dec'],
rec['ext']/scale,
rec['src_id'],
rec['det_like'],
rec['ext_like']))
def get_log_distr(infile=None, index_col='src_id', field=None, minval=10, maxval=100, nbin=50):
logbins=np.logspace(np.log10(minval),np.log10(maxval), nbin)
df = pandas.read_csv(infile, index_col=index_col)
data = df[field]
#hist, edges = np.histogram(array, bins=logbins)
return data, logbins