forked from erosita/uds
update
This commit is contained in:
parent
09deb0e4f0
commit
4e6025f357
6
.gitignore
vendored
6
.gitignore
vendored
@ -61,5 +61,9 @@ venv.bak/
|
||||
*.aspsol
|
||||
*.txt
|
||||
*.png
|
||||
*.grp
|
||||
*.csv
|
||||
.*
|
||||
!.gitignore
|
||||
*.qdp
|
||||
*.pickle
|
||||
!.gitignore
|
||||
|
11
data/xspec/ML0001.xcm
Normal file
11
data/xspec/ML0001.xcm
Normal file
@ -0,0 +1,11 @@
|
||||
method leven 10 0.01
|
||||
abund wilm
|
||||
xsect vern
|
||||
cosmo 70 0 0.73
|
||||
xset delta 0.01
|
||||
systematic 0
|
||||
model TBabs*powerlaw
|
||||
0.0123819 1 0 0 100000 1e+06
|
||||
2 -1 -3 -2 9 10
|
||||
0.000106473 0.01 0 0 1e+20 1e+24
|
||||
bayes off
|
11
data/xspec/ML0003.xcm
Normal file
11
data/xspec/ML0003.xcm
Normal file
@ -0,0 +1,11 @@
|
||||
method leven 10 0.01
|
||||
abund wilm
|
||||
xsect vern
|
||||
cosmo 70 0 0.73
|
||||
xset delta 0.01
|
||||
systematic 0
|
||||
model TBabs*powerlaw
|
||||
0.00327282 1 0 0 100000 1e+06
|
||||
2 -1 -3 -2 9 10
|
||||
0.000121635 0.01 0 0 1e+20 1e+24
|
||||
bayes off
|
11
data/xspec/ML0004.xcm
Normal file
11
data/xspec/ML0004.xcm
Normal file
@ -0,0 +1,11 @@
|
||||
method leven 10 0.01
|
||||
abund wilm
|
||||
xsect vern
|
||||
cosmo 70 0 0.73
|
||||
xset delta 0.01
|
||||
systematic 0
|
||||
model TBabs*powerlaw
|
||||
0.00240283 1 0 0 100000 1e+06
|
||||
2 -1 -3 -2 9 10
|
||||
0.000297459 0.01 0 0 1e+20 1e+24
|
||||
bayes off
|
11
data/xspec/ML0005.xcm
Normal file
11
data/xspec/ML0005.xcm
Normal file
@ -0,0 +1,11 @@
|
||||
method leven 10 0.01
|
||||
abund wilm
|
||||
xsect vern
|
||||
cosmo 70 0 0.73
|
||||
xset delta 0.01
|
||||
systematic 0
|
||||
model TBabs*powerlaw
|
||||
0.0320991 1 0 0 100000 1e+06
|
||||
2 -1 -3 -2 9 10
|
||||
0.000123814 0.01 0 0 1e+20 1e+24
|
||||
bayes off
|
11
data/xspec/ecf.xcm
Normal file
11
data/xspec/ecf.xcm
Normal file
@ -0,0 +1,11 @@
|
||||
method leven 10 0.01
|
||||
abund wilm
|
||||
xsect vern
|
||||
cosmo 70 0 0.73
|
||||
xset delta 0.01
|
||||
systematic 0
|
||||
model TBabs*powerlaw
|
||||
0.02 -1 0 0 100000 1e+06
|
||||
2 -1 -3 -2 9 10
|
||||
4.83094e-05 0.01 0 0 1e+20 1e+24
|
||||
bayes off
|
8
data/xspec/load.xcm
Normal file
8
data/xspec/load.xcm
Normal file
@ -0,0 +1,8 @@
|
||||
data tm0_SrcTool_020_SourceSpec_00005.fits.grp
|
||||
ign bad
|
||||
ign **-0.2
|
||||
ign 10.-**
|
||||
cpd /xs
|
||||
setpl en
|
||||
pl lda
|
||||
|
11
products/srctool-products/ecf.xcm
Normal file
11
products/srctool-products/ecf.xcm
Normal file
@ -0,0 +1,11 @@
|
||||
method leven 10 0.01
|
||||
abund wilm
|
||||
xsect vern
|
||||
cosmo 70 0 0.73
|
||||
xset delta 0.01
|
||||
systematic 0
|
||||
model TBabs*powerlaw
|
||||
0.02 -1 0 0 100000 1e+06
|
||||
2 -1 -3 -2 9 10
|
||||
4.83094e-05 0.01 0 0 1e+20 1e+24
|
||||
bayes off
|
13
products/srctool-products/rate.pro
Normal file
13
products/srctool-products/rate.pro
Normal file
@ -0,0 +1,13 @@
|
||||
pro rate
|
||||
ar=[[1.8847140728347272e-13, 0.37631887197494507],$
|
||||
[1.521645609807299e-13, 0.40992528200149536],$
|
||||
[4.915687878270888e-13, 1.0217742919921875],$
|
||||
[1.7903951117830771e-13, 0.282895565032959],$
|
||||
[6.886249544234846e-14, 0.21058528125286102],$
|
||||
[9.49530897031588e-14, 0.2018834501504898]]
|
||||
|
||||
for i=0L,5 do begin
|
||||
print, i, (ar[1,i])/(ar[0,i])
|
||||
endfor
|
||||
|
||||
end
|
@ -19,6 +19,7 @@
|
||||
|
||||
Запуск отдельных команд управляется переменными, например: do_init = True
|
||||
Выбранный энергетический диапазон управляется переменной index
|
||||
forced=True делает принудительную фотометрию
|
||||
|
||||
ПАРАМЕТРЫ:
|
||||
|
||||
@ -66,30 +67,27 @@ create_folder(outfile_dir)
|
||||
|
||||
outkey="tm0"
|
||||
|
||||
do_init = False
|
||||
do_merge = False
|
||||
do_detmask = False
|
||||
do_expmap = False
|
||||
do_init = True
|
||||
do_merge = True
|
||||
do_detmask = True
|
||||
do_expmap = True
|
||||
do_erbox1 = True # local mode
|
||||
do_erbackmap1 = True #
|
||||
do_erbox2 = True # map mode, with background map
|
||||
do_erbackmap2 = True #
|
||||
do_erbox3 = True # map mode, with background map
|
||||
do_erbackmap3 = True #
|
||||
do_ersensmap = True
|
||||
do_ermldet = False
|
||||
do_catprep = False
|
||||
do_filter_catalog = False
|
||||
do_cross_match = False
|
||||
|
||||
index=1
|
||||
|
||||
do_erbox1 = False # local mode
|
||||
do_erbackmap1 = False #
|
||||
do_erbox2 = False # map mode, with background map
|
||||
do_erbackmap2 = False #
|
||||
do_erbox3 = False # map mode, with background map
|
||||
do_erbackmap3 = False #
|
||||
forced=True
|
||||
""" If forced=True, take input catalog from energy range en0 """
|
||||
|
||||
do_ersensmap = False
|
||||
do_ermldet = True
|
||||
do_catprep = True
|
||||
do_filter_catalog = True
|
||||
|
||||
#do_ermldet = False
|
||||
#do_cross_match = False
|
||||
|
||||
|
||||
|
||||
index=4
|
||||
|
||||
vign=True
|
||||
vignetting = 'vign' if (vign==True) else 'novign'
|
||||
@ -110,7 +108,7 @@ for tmkey in keylist_tm.keys():
|
||||
do_obsmode=False,
|
||||
do_center=False,
|
||||
do_evtool=False,
|
||||
do_expmap=False,
|
||||
do_expmap=True,
|
||||
vign=vign,
|
||||
ra_cen=ra_cen, de_cen=de_cen,
|
||||
emin_kev=emin_kev[index],
|
||||
@ -138,9 +136,11 @@ if(do_detmask==True):
|
||||
"""
|
||||
Собираем общую карту экспозиции, обратите внимание на коэффициент 7.
|
||||
Экспозиция рассчитывается на 7 телескопов.
|
||||
outfile_expmap="{}_ExposureMap_en{}.{}.fits".format(os.path.join(outfile_dir,outkey), eband[index], vignetting)
|
||||
outfile_bkgmap="{}_BackMap_en{}.{}.fits".format(os.path.join(outfile_dir,outkey), eband[index], vignetting)
|
||||
"""
|
||||
outfile_expmap="{}_ExposureMap_en{}{}".format(os.path.join(outfile_dir,outkey),
|
||||
eband[index],
|
||||
outfile_expmap="{}_ExposureMap_en{}.{}{}".format(os.path.join(outfile_dir,outkey),
|
||||
eband[index],vignetting,
|
||||
outfile_post)
|
||||
if(do_expmap==True):
|
||||
create_expmap_merged(expmaps,outfile_expmap,scale=7.0)
|
||||
@ -231,17 +231,38 @@ if(do_erbox3==True):
|
||||
save_ds9reg(outfile_boxlist3)
|
||||
|
||||
""" Background map 3 """
|
||||
outfile_backmap3="{}_BackMap3_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
cheese_mask="{}_CheeseMask3_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
outfile_backmap3="{}_BackMap3_en{}.{}{}".format(os.path.join(outfile_dir,outkey), eband[index], vignetting, outfile_post)
|
||||
cheese_mask="{}_CheeseMask3_en{}.{}{}".format(os.path.join(outfile_dir,outkey), eband[index], vignetting, outfile_post)
|
||||
if(do_erbackmap3==True):
|
||||
do_erbackmap_esass(outfile_evtool,outfile_expmap,outfile_boxlist3,detmask,emin_ev[index],emax_ev[index],
|
||||
outfile_backmap3,cheese_mask)
|
||||
boxlist3 = outfile_boxlist3 if(forced == False) else "{}/{}_BoxList3_en{}{}".format(outfile_dir,outkey, eband[0], outfile_post)
|
||||
do_erbackmap_esass(outfile_evtool,outfile_expmap,boxlist3,detmask,emin_ev[index],emax_ev[index],
|
||||
outfile_backmap3,cheese_mask)
|
||||
|
||||
if(forced==True):
|
||||
mllist="{}_MaxLikSourceList_en{}.forced{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
srcmap="{}_SourceMap_en{}.forced{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
""" give mllist from en0 as input """
|
||||
boxlist3="{}_MaxLikSourceList_en{}{}".format(os.path.join(outfile_dir,outkey), eband[0], outfile_post)
|
||||
fitpos_flag="fitpos_flag=no"
|
||||
fitext_flag="fitext_flag=no"
|
||||
nmulsou = "nmulsou=1"
|
||||
""" don't allow ermpldet to split sources """
|
||||
if(index == 3):
|
||||
""" for hard band take unvignetted background """
|
||||
outfile_backmap3="{}_BackMap3_en{}.{}{}".format(os.path.join(outfile_dir,outkey), eband[index], "novign", outfile_post)
|
||||
|
||||
else:
|
||||
mllist="{}_MaxLikSourceList_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
srcmap="{}_SourceMap_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
boxlist3 = outfile_boxlist3
|
||||
fitpos_flag="fitpos_flag=yes"
|
||||
fitext_flag="fitext_flag=yes"
|
||||
nmulsou = "nmulsou=2"
|
||||
""" allow ermpldet to split sources (no more than two) """
|
||||
|
||||
mllist="{}_MaxLikSourceList_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
srcmap="{}_SourceMap_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
cmd=["ermldet",
|
||||
"mllist=%s" %(mllist),
|
||||
"boxlist=%s" %(outfile_boxlist3),
|
||||
"boxlist=%s" %(boxlist3),
|
||||
"images=\'{}\'".format(outfile_evtool),
|
||||
"expimages=\'{}\'".format(outfile_expmap),
|
||||
"detmasks=\'{}\'".format(detmask),
|
||||
@ -250,7 +271,7 @@ cmd=["ermldet",
|
||||
"emax=\'{}\'".format(emax_ev[index]),
|
||||
"ecf=\'{}\'".format(ecf[index]),
|
||||
"hrdef=",
|
||||
"likemin=5.",
|
||||
"likemin=0.",
|
||||
"extlikemin=6.",
|
||||
"compress_flag=N",
|
||||
"cutrad=10.", # was 15
|
||||
@ -267,14 +288,13 @@ cmd=["ermldet",
|
||||
"thres_col=like",
|
||||
"thres_val=30.",
|
||||
"nmaxfit=4",
|
||||
"nmulsou=2",
|
||||
"fitext_flag=yes",
|
||||
nmulsou,
|
||||
fitpos_flag,
|
||||
fitext_flag,
|
||||
"srcima_flag=yes",
|
||||
"srcimages=\'{}\'".format(srcmap)
|
||||
]
|
||||
|
||||
sensmap="{}_SensitivityMap_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
area="{}_AreaTable_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
if(do_ersensmap==True):
|
||||
detlike=10
|
||||
create_sensmap(sensmap="{}_SensitivityMap_dl{}_en{}{}".format(os.path.join(outfile_dir,outkey), detlike,
|
||||
@ -283,15 +303,32 @@ if(do_ersensmap==True):
|
||||
detlike, eband[index], outfile_post),
|
||||
expmap=outfile_expmap, backmap=outfile_backmap3,detlike=detlike,
|
||||
detmask=detmask, emin=emin_ev[index], emax=emax_ev[index],ecf=ecf[index])
|
||||
|
||||
"""
|
||||
detlike=6
|
||||
create_sensmap(sensmap="{}_SensitivityMap_dl{}_en{}{}".format(os.path.join(outfile_dir,outkey), detlike,
|
||||
eband[index], outfile_post),
|
||||
areatab="{}_AreaTable_dl{}_en{}{}".format(os.path.join(outfile_dir,outkey),
|
||||
detlike, eband[index], outfile_post),
|
||||
expmap=outfile_expmap, backmap=outfile_backmap3,detlike=detlike,
|
||||
detmask=detmask, emin=emin_ev[index], emax=emax_ev[index],ecf=ecf[index])
|
||||
"""
|
||||
|
||||
if(do_ermldet==True):
|
||||
remove_file(mllist)
|
||||
remove_file(srcmap)
|
||||
os.system((" ").join(cmd))
|
||||
print((" ").join(cmd))
|
||||
if(vign==False):
|
||||
print('Run ermldet with vignetted exposure!')
|
||||
sys.exit()
|
||||
remove_file(mllist)
|
||||
remove_file(srcmap)
|
||||
os.system((" ").join(cmd))
|
||||
print((" ").join(cmd))
|
||||
if(forced==True):
|
||||
check_ermldet_forced(mllist)
|
||||
|
||||
if(forced==True):
|
||||
catprep="{}_SourceCatalog_en{}.forced{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
else:
|
||||
catprep="{}_SourceCatalog_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
|
||||
catprep="{}_SourceCatalog_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
if(do_catprep==True):
|
||||
cmd=["catprep",
|
||||
"infile={}".format(mllist),
|
||||
@ -306,3 +343,6 @@ if(do_filter_catalog==True):
|
||||
filter_catprep(catprep,expcut=5000.0,dlmin=10,dlmax=10000,outkey='bright')
|
||||
filter_catprep(catprep,expcut=5000.0,dlmin=6,dlmax=10,outkey='faint')
|
||||
|
||||
if(do_cross_match==True):
|
||||
crossmatch_shu2019(catprep,dlmin=10,refimage=outfile_evtool,crval=[ra_cen, de_cen],
|
||||
catalog=root_path+"/data/Gaia_unWISE/Gaia_unWISE_UDS.fits.catalog")
|
||||
|
@ -2,32 +2,31 @@
|
||||
"""
|
||||
НАЗВАНИЕ:
|
||||
|
||||
04_mosaics.py
|
||||
05_srctool.py
|
||||
|
||||
|
||||
НАЗНАЧЕНИЕ:
|
||||
|
||||
Собирает мозайки в разных энергетических диапазонах.
|
||||
|
||||
Запускает scrtool для самого широкого канала 0.2-10 кэВ, чтобы спектры имели самое полное покрытие по энергиям. Список источников берется из 0.3-2.3 кэВ.
|
||||
|
||||
ВЫЗОВ:
|
||||
|
||||
esass
|
||||
./01_mosaics.py
|
||||
./05_srctool.py
|
||||
|
||||
|
||||
УПРАВЛЕНИЕ:
|
||||
|
||||
Запуск отдельных команд управляется переменными, например: do_init = True
|
||||
Выбранный энергетический диапазон управляется переменной index
|
||||
Требуется запуск предыдущего скрипта 04_mosaics.py
|
||||
|
||||
ПАРАМЕТРЫ:
|
||||
|
||||
index : Выбранный энергетический диапазон
|
||||
index=4 : Выбранный энергетический диапазон
|
||||
|
||||
|
||||
ВЫВОД:
|
||||
|
||||
Выходные файлы записываются в директорию outfile_dir
|
||||
Выходные файлы записываются в директорию outfile_dir/srctool_dir
|
||||
|
||||
|
||||
ИСТОРИЯ:
|
||||
@ -49,6 +48,7 @@ import uds
|
||||
|
||||
from uds.utils import *
|
||||
from uds.config import *
|
||||
from uds.sherpa import *
|
||||
|
||||
|
||||
""" find UDS root dir """
|
||||
@ -73,10 +73,13 @@ outfile_srctool="{}_SrcTool_".format(outkey)
|
||||
|
||||
do_init = False
|
||||
do_merge = False
|
||||
do_srctool = True
|
||||
do_grppha = True
|
||||
do_srctool = False
|
||||
do_grppha = False
|
||||
do_ecf_calc = False
|
||||
do_ecf_print = False
|
||||
do_catalog = True
|
||||
|
||||
index=4
|
||||
index=1
|
||||
""" работаем именно в этом диапазоне, чтобы спектры покрывали все энергии """
|
||||
|
||||
vign=True
|
||||
@ -122,6 +125,7 @@ is also appended to the filename. """
|
||||
|
||||
catprep="{}_SourceCatalog_en{}{}".format(os.path.join(outfile_dir,outkey), eband[0], outfile_post)
|
||||
""" take source catalog from 0.3-2.3 keV band """
|
||||
|
||||
if not (os.path.isfile(catprep)==True):
|
||||
print("{} not found, run 04_mosaics.py?".format(catprep))
|
||||
sys.exit()
|
||||
@ -146,5 +150,19 @@ if(do_srctool==True):
|
||||
if(do_grppha==True):
|
||||
group_spectra("{}/*_SourceSpec_*.fits".format(srctool_dir))
|
||||
|
||||
ecfout="{}_SampleFlux.pickle".format(os.path.join(outfile_dir,outkey))
|
||||
|
||||
if(do_ecf_calc==True):
|
||||
calc_ecf("{}/tm0_SrcTool_020_ARF_?????.fits".format(srctool_dir),
|
||||
catprep=catprep, emin=emin_kev, emax=emax_kev, eband=eband, outfile=ecfout, simnum=10000)
|
||||
|
||||
if(do_ecf_print==True):
|
||||
print_ecf(infile=ecfout, emin=emin_kev, emax=emax_kev, eband=eband, skipfrac=10.0)
|
||||
|
||||
index=0
|
||||
catprep="{}_SourceCatalog_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
if(do_catalog==True):
|
||||
make_catalog(infile=catprep, dlmin=10.0, dlmax=100000, ext_like=10, ecf=ecf[index],
|
||||
emin=emin_kev[index], emax=emax_kev[index], eband=eband[index],
|
||||
infile_en03cat='../products/tm0_SourceCatalog_en3.forced.fits',
|
||||
infile_en03sens='../products/tm0_SensitivityMap_dl10_en3.fits')
|
||||
|
189
scripts/06_plot.py
Executable file
189
scripts/06_plot.py
Executable file
@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
НАЗВАНИЕ:
|
||||
|
||||
05_srctool.py
|
||||
|
||||
|
||||
НАЗНАЧЕНИЕ:
|
||||
|
||||
Запускает scrtool для самого широкого канала 0.2-10 кэВ, чтобы спектры имели самое полное покрытие по энергиям. Список источников берется из 0.3-2.3 кэВ.
|
||||
|
||||
ВЫЗОВ:
|
||||
|
||||
esass
|
||||
./05_srctool.py
|
||||
|
||||
|
||||
УПРАВЛЕНИЕ:
|
||||
|
||||
Требуется запуск предыдущего скрипта 04_mosaics.py
|
||||
|
||||
ПАРАМЕТРЫ:
|
||||
|
||||
index=4 : Выбранный энергетический диапазон
|
||||
|
||||
|
||||
ВЫВОД:
|
||||
|
||||
Выходные файлы записываются в директорию outfile_dir/srctool_dir
|
||||
|
||||
|
||||
ИСТОРИЯ:
|
||||
|
||||
Роман Кривонос, ИКИ РАН, krivonos@cosmos.ru
|
||||
Март 2023
|
||||
|
||||
"""
|
||||
|
||||
from astropy.wcs import WCS
|
||||
from astropy.io import fits
|
||||
import sys, os, os.path, time, subprocess
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
import glob
|
||||
from os.path import dirname
|
||||
import inspect
|
||||
import uds
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
|
||||
from uds.utils import *
|
||||
from uds.config import *
|
||||
from uds.sherpa import *
|
||||
|
||||
|
||||
""" find UDS root dir """
|
||||
#root_path=dirname(dirname(dirname(inspect.getfile(uds))))
|
||||
"""
|
||||
ftools does not like long file path names,
|
||||
for this reason, we use relative path here
|
||||
"""
|
||||
root_path='..'
|
||||
print("UDS root path: {}".format(root_path))
|
||||
|
||||
infile_dir=root_path+'/data/processed'
|
||||
outfile_dir=root_path+'/products'
|
||||
create_folder(outfile_dir)
|
||||
|
||||
srctool_dir="{}/{}".format(outfile_dir,"srctool-products")
|
||||
create_folder(srctool_dir)
|
||||
|
||||
outkey="tm0"
|
||||
|
||||
outfile_srctool="{}_SrcTool_".format(outkey)
|
||||
|
||||
do_flux_distr = False
|
||||
do_sens_curve = True
|
||||
|
||||
index=0
|
||||
|
||||
catalog = "{}_SourceCatalog_en{}.main.selected.csv".format(os.path.join(outfile_dir,outkey), eband[0])
|
||||
|
||||
if not (os.path.isfile(catalog)==True):
|
||||
print("{} not found, run 05_srctool.py?".format(catalog))
|
||||
sys.exit()
|
||||
|
||||
if(do_flux_distr==True):
|
||||
|
||||
data, logbins = get_log_distr(infile=catalog, field='ml_rate', minval=1e-3, maxval=2)
|
||||
fig, ax = plt.subplots()
|
||||
for axis in ['top','bottom','left','right']:
|
||||
ax.spines[axis].set_linewidth(1)
|
||||
ax.tick_params(axis="both", width=1, labelsize=14)
|
||||
plt.hist(data, bins=logbins, histtype='step', color='blue', linewidth=1, linestyle='solid')
|
||||
plt.xlabel('Count rate (counts s$^{-1}$)',fontsize=14, fontweight='normal')
|
||||
plt.ylabel('Number',fontsize=14, fontweight='normal')
|
||||
plt.grid(visible=True)
|
||||
plt.xscale('log')
|
||||
plt.yscale('log')
|
||||
plt.savefig(catalog.replace("main.selected.csv", "ml_rate.png"), bbox_inches='tight')
|
||||
plt.close(fig)
|
||||
|
||||
|
||||
data, logbins = get_log_distr(infile=catalog, field='ml_flux', minval=1e-15, maxval=2e-12)
|
||||
fig, ax = plt.subplots()
|
||||
for axis in ['top','bottom','left','right']:
|
||||
ax.spines[axis].set_linewidth(1)
|
||||
ax.tick_params(axis="both", width=1, labelsize=14)
|
||||
plt.hist(data, bins=logbins, histtype='step', color='blue', linewidth=1, linestyle='solid')
|
||||
plt.xlabel('Energy flux (erg s$^{-1}$ cm$^{-2}$)',fontsize=14, fontweight='normal')
|
||||
plt.ylabel('Number',fontsize=14, fontweight='normal')
|
||||
plt.grid(visible=True)
|
||||
plt.xscale('log')
|
||||
plt.yscale('log')
|
||||
plt.savefig(catalog.replace("main.selected.csv", "ml_flux.png"), bbox_inches='tight')
|
||||
plt.close(fig)
|
||||
|
||||
if(do_sens_curve==True):
|
||||
coeff = 2.4336e-13 / 3.4012e-13 # see below
|
||||
|
||||
areatab="{}_AreaTable_dl10_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
hdul = fits.open(areatab)
|
||||
tbdata = hdul[1].data
|
||||
""" convert limflux from 0.3-2.3 keV to 0.5-2 keV """
|
||||
limflux_dl10 = tbdata['LIMFLUX']*coeff
|
||||
area_dl10 = tbdata['SKY_AREA']
|
||||
hdul.close()
|
||||
|
||||
areatab="{}_AreaTable_dl6_en{}{}".format(os.path.join(outfile_dir,outkey), eband[index], outfile_post)
|
||||
hdul = fits.open(areatab)
|
||||
tbdata = hdul[1].data
|
||||
""" convert limflux from 0.3-2.3 keV to 0.5-2 keV """
|
||||
limflux_dl6 = tbdata['LIMFLUX']*coeff
|
||||
area_dl6 = tbdata['SKY_AREA']
|
||||
hdul.close()
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
for axis in ['top','bottom','left','right']:
|
||||
ax.spines[axis].set_linewidth(1)
|
||||
ax.tick_params(axis="both", width=1, labelsize=14)
|
||||
ax.set_xlim([3e-16,1e-13])
|
||||
ax.set_ylim([0.3,160])
|
||||
|
||||
df = pandas.read_csv("../data/surveys/eFEDS.dat",header=None,names=['limflux','area'])
|
||||
plt.plot(df['limflux'],df['area'], color="brown", linestyle='solid',label="eFEDS (DL6)")
|
||||
|
||||
plt.plot(limflux_dl10,area_dl10, color="black", linestyle='solid',label="eUDS (DL10)")
|
||||
plt.plot(limflux_dl6,area_dl6, color="black", linestyle='dashed', label="eUDS (DL6)")
|
||||
|
||||
df = pandas.read_csv("../data/surveys/cosmos-legacy.dat",header=None,names=['limflux','area'])
|
||||
plt.plot(df['limflux'],df['area'], color="blue", linestyle='solid',label="COSMOS Legacy")
|
||||
|
||||
df = pandas.read_csv("../data/surveys/CDWFS.dat",header=None,names=['limflux','area'])
|
||||
plt.plot(df['limflux'],df['area'], color="green", linestyle='solid', label="CDWFS")
|
||||
|
||||
df = pandas.read_csv("../data/surveys/XMM-RM.dat",header=None,names=['limflux','area'])
|
||||
plt.plot(df['limflux'],df['area'], color="magenta", linestyle='solid',label="XMM-RM")
|
||||
|
||||
df = pandas.read_csv("../data/surveys/XMM-XXL-N.dat",header=None,names=['limflux','area'])
|
||||
plt.plot(df['limflux'],df['area'], color="red", linestyle='solid',label="XMM-XXL-N")
|
||||
|
||||
|
||||
ax.legend()
|
||||
|
||||
#plt.hist(data, bins=logbins, histtype='step', color='blue', linewidth=1, linestyle='solid')
|
||||
plt.xlabel('Limiting flux (0.5-2 keV, erg s$^{-1}$ cm$^{-2}$)',fontsize=14, fontweight='normal')
|
||||
plt.ylabel('Area (deg$^{2}$)',fontsize=14, fontweight='normal')
|
||||
plt.grid(visible=True)
|
||||
plt.xscale('log')
|
||||
plt.yscale('log')
|
||||
png="{}_AreaTable_en{}.png".format(os.path.join(outfile_dir,outkey), eband[index])
|
||||
plt.savefig(png, bbox_inches='tight')
|
||||
plt.close(fig)
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
Model TBabs<1>*powerlaw<2> Source No.: 1 Active/On
|
||||
Model Model Component Parameter Unit Value
|
||||
par comp
|
||||
1 1 TBabs nH 10^22 2.00000E-02 frozen
|
||||
2 2 powerlaw PhoIndex 2.00000 frozen
|
||||
3 2 powerlaw norm 1.14851E-04 +/- 3.83988E-06
|
||||
________________________________________________________________________
|
||||
|
||||
Model Flux 0.00028334 photons (3.4012e-13 ergs/cm^2/s) range (0.30000 - 2.3000 keV)
|
||||
Model Flux 0.0001619 photons (2.4336e-13 ergs/cm^2/s) range (0.50000 - 2.0000 keV)
|
||||
"""
|
@ -33,4 +33,8 @@ source <MY PATH>/eSASS4EDR/bin/esass-init.csh
|
||||
|
||||
### 04_mosaics.py
|
||||
|
||||
Создает сборные изображения (мозайки) в разных энергетических диапазонах.
|
||||
Создает сборные изображения (мозайки) в разных энергетических диапазонах.
|
||||
|
||||
### 05_scrtool.py
|
||||
|
||||
Запускает scrtool для самого широкого канала 0.2-10 кэВ, чтобы спектры имели самое полное покрытие по энергиям. Список источников берется из 0.3-2.3 кэВ.
|
||||
|
@ -51,7 +51,25 @@ emax_ev=[2300, 600, 2300, 5000, 10000,8000]
|
||||
|
||||
emin_kev=[0.3, 0.3, 0.6, 2.3, 0.2, 0.3]
|
||||
emax_kev=[2.3, 0.6, 2.3, 5.0, 10.0, 8.0]
|
||||
ecf = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
|
||||
|
||||
#ecf = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
|
||||
ecf = [9.7817E+11, 3.2982E+12, 1.3903E+12, 2.3322E+12, 5.2022E+11, 5.8453E+11]
|
||||
|
||||
"""
|
||||
*** en0 ecf 9.7817E+11 +/- 2.4606E+10 2.52% N=17
|
||||
*** en1 ecf 3.2982E+12 +/- 8.2963E+10 2.52% N=17
|
||||
*** en2 ecf 1.3903E+12 +/- 3.5036E+10 2.52% N=17
|
||||
*** en3 ecf 2.3322E+12 +/- 5.8717E+10 2.52% N=17
|
||||
*** en4 ecf 5.2022E+11 +/- 1.3110E+10 2.52% N=17
|
||||
*** en5 ecf 5.8453E+11 +/- 1.4743E+10 2.52% N=17
|
||||
"""
|
||||
|
||||
outfile_post='.fits'
|
||||
|
||||
"""
|
||||
Pavel Medvedev:
|
||||
0.3-2.3: 9.135819435325375e-13
|
||||
0.3-0.6: 9.160477830652834e-13
|
||||
0.6-2.3: 9.201664167869427e-13
|
||||
2.3-5.0: 8.721504826794627e-12
|
||||
"""
|
||||
|
219
uds/uds/sherpa.py
Normal file
219
uds/uds/sherpa.py
Normal file
@ -0,0 +1,219 @@
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
from astropy.io import fits
|
||||
import pickle
|
||||
from scipy.stats import sem
|
||||
|
||||
#import matplotlib.pyplot as plt
|
||||
#plt.style.use('ggplot')
|
||||
|
||||
from sherpa.astro.ui import *
|
||||
|
||||
def calc_ecf(path,catprep=None, emin=None, emax=None, eband=None, outfile=None, fitmin=0.2, fitmax=5.0, simnum=100):
|
||||
"""
|
||||
Do uncorrection of SPECRESP for CORRCOMP=CORRPSF*CORRVIGN, like this:
|
||||
SPECRESP=SPECRESP/CORRCOMB, because because ermldet does not correct for PSF and vignetting.
|
||||
"""
|
||||
|
||||
data={}
|
||||
|
||||
if (catprep == None):
|
||||
print("catprep file is not provided, exit")
|
||||
sys.exit()
|
||||
else:
|
||||
print("Reading {}".format(catprep))
|
||||
|
||||
catprep_hdul = fits.open(catprep,mode='readonly')
|
||||
catprep_data = catprep_hdul[1].data
|
||||
#print(catprep_data['detUID'])
|
||||
mlrate = {}
|
||||
mlrate_err = {}
|
||||
dl = {}
|
||||
for rec in catprep_data:
|
||||
srcid=rec['detUID']
|
||||
|
||||
""" consider only non-extended sources """
|
||||
if(rec['EXT'] > 0.0):
|
||||
continue
|
||||
|
||||
if(srcid in mlrate.keys()):
|
||||
print("Duplicate SrcID! {}".format(srcid))
|
||||
sys.exit()
|
||||
else:
|
||||
mlrate[srcid]=rec['ML_RATE_0']
|
||||
mlrate_err[srcid]=rec['ML_RATE_ERR_0']
|
||||
dl[srcid]=rec['DET_LIKE_0']
|
||||
|
||||
|
||||
flist=glob.glob(path)
|
||||
for filename in flist:
|
||||
|
||||
print("Reading {}".format(filename))
|
||||
|
||||
fout=filename.replace(".fits", ".uncorr.fits")
|
||||
|
||||
pha=filename.replace("ARF", "SourceSpec")
|
||||
pha=pha.replace(".fits", ".fits.grp")
|
||||
pha_header = fits.getheader(pha,ext=1)
|
||||
object = pha_header['OBJECT']
|
||||
|
||||
""" check a given source (testing...) """
|
||||
#if (int(object) > 10):
|
||||
# continue
|
||||
|
||||
srcid="ML{}".format(object)
|
||||
if not (srcid in mlrate.keys()):
|
||||
print("Skip {}".format(srcid))
|
||||
continue
|
||||
|
||||
|
||||
hdul = fits.open(filename,mode='readonly')
|
||||
specresp = np.divide(hdul[1].data['SPECRESP'], hdul[1].data['CORRCOMB'])
|
||||
hdul[1].data['SPECRESP'] = specresp
|
||||
hdul.writeto(fout, overwrite=True)
|
||||
clean() # sherpa
|
||||
set_xsxsect("vern")
|
||||
set_xsabund('wilm')
|
||||
load_pha(pha) # sherpa
|
||||
|
||||
""" load uncorrected ARF """
|
||||
# load_arf(fout) # sherpa
|
||||
|
||||
# Define the source model
|
||||
#sh.set_source("powlaw1d.p1")
|
||||
set_source("xstbabs.abs1 * powlaw1d.p1") # sherpa
|
||||
abs1.nH = 0.02
|
||||
# Change reference energy of the model
|
||||
p1.ref = 1 # 1 keV
|
||||
p1.gamma = 2.0
|
||||
p1.ampl = 1e-4 # in cm**-2 s**-1 keV**-1
|
||||
freeze(abs1.nH, p1.gamma) # sherpa
|
||||
|
||||
### Define the statistic
|
||||
set_stat("wstat") # sherpa
|
||||
|
||||
ignore_bad() # sherpa
|
||||
### Define the fit range
|
||||
print("Notice 0.3-5.0 keV:")
|
||||
notice(fitmin, fitmax) # sherpa
|
||||
|
||||
""" Check wether spectrum still has counts after filtering """
|
||||
dep = get_dep(filter=True)
|
||||
if not (len(dep)):
|
||||
print("*** Skip {} due to low counts".format(srcid))
|
||||
continue
|
||||
|
||||
|
||||
|
||||
### Do the fit
|
||||
fit()
|
||||
res_fit=get_fit_results()
|
||||
rstat=res_fit.rstat
|
||||
|
||||
set_analysis('energy')
|
||||
|
||||
photon_flx=[]
|
||||
energy_flx=[]
|
||||
skipme=False
|
||||
for ie in range(len(eband)):
|
||||
ph_flx=calc_photon_flux(emin[ie], emax[ie])
|
||||
en_flx=calc_energy_flux(emin[ie], emax[ie])
|
||||
if not (ph_flx>0):
|
||||
skipme=True
|
||||
photon_flx.append(ph_flx)
|
||||
energy_flx.append(en_flx)
|
||||
print("en{} Photon flux: {:.4E} Energy flux: {:.4E}".format(eband[ie], ph_flx, en_flx))
|
||||
|
||||
if (skipme == True):
|
||||
print("*** Skip zero flux for {}".format(srcid))
|
||||
continue
|
||||
|
||||
print("----> Success: {} pow norm: {:.4E} reduced stat: {:.2f}".format(res_fit.succeeded,res_fit.parvals[0], rstat))
|
||||
|
||||
sample_flx=[]
|
||||
sample_flx_lo=[]
|
||||
sample_flx_hi=[]
|
||||
if (res_fit.rstat < 3.0):
|
||||
conf()
|
||||
res_conf=get_conf_results()
|
||||
#print(res_conf)
|
||||
component=abs1+p1
|
||||
#print(component)
|
||||
#covar()
|
||||
#errs = get_covar_results().parmaxes
|
||||
#ans = sample_flux(correlated=False, scales=errs, num=500)
|
||||
for ie in range(len(eband)):
|
||||
pars_flux = sample_flux(modelcomponent=component, lo=emin[ie], hi=emax[ie], num=simnum, correlated=True, numcores=10, confidence=68)
|
||||
sample_flx.append(pars_flux[0][0])
|
||||
sample_flx_lo.append(pars_flux[0][2])
|
||||
sample_flx_hi.append(pars_flux[0][1])
|
||||
|
||||
else:
|
||||
continue
|
||||
|
||||
print("### SAMPLE FLUX: {}".format(sample_flx))
|
||||
data[srcid]={'sample_flx':sample_flx,
|
||||
'sample_flx_lo':sample_flx_lo,
|
||||
'sample_flx_hi':sample_flx_hi,
|
||||
'photon_flx':photon_flx,
|
||||
'energy_flx':energy_flx,
|
||||
'ml_rate':mlrate[srcid],
|
||||
'ml_rate_err':mlrate_err[srcid],
|
||||
'det_like':dl[srcid]}
|
||||
|
||||
with open(outfile, 'wb') as f:
|
||||
pickle.dump(data, f)
|
||||
|
||||
|
||||
def print_ecf(infile=None, emin=None, emax=None, eband=None, skipfrac=5.0):
|
||||
|
||||
|
||||
|
||||
with open(infile, 'rb') as f:
|
||||
data = pickle.load(f)
|
||||
|
||||
for ie in range(len(eband)):
|
||||
print()
|
||||
ecf=[]
|
||||
|
||||
for key in data.keys():
|
||||
ar=data[key]
|
||||
ml = ar['ml_rate']
|
||||
dml = ar['ml_rate_err']
|
||||
|
||||
flx = ar['sample_flx'][ie]
|
||||
dflx1 = ar['sample_flx'][ie] - ar['sample_flx_lo'][ie]
|
||||
dflx2 = ar['sample_flx_hi'][ie] - ar['sample_flx'][ie]
|
||||
|
||||
# take maximum error as conservative approach
|
||||
dflx = np.maximum(dflx1,dflx2)
|
||||
|
||||
ecf0 = ml/flx
|
||||
decf0 = abs(flx*dml - ml*dflx) / np.power(flx,2)
|
||||
frac0 = 100 * abs(flx*dml - ml*dflx) / (flx*ml)
|
||||
|
||||
skipme=False
|
||||
if((100*dflx/flx) < skipfrac):
|
||||
skipme=False
|
||||
else:
|
||||
skipme=True
|
||||
|
||||
#if not (len(ar['sample_flx'])>0):
|
||||
# continue
|
||||
print("en{} {} DL={:.2f} FLUX: {:4E} (-{:.4E} +{:.4E} {:.2f}%) ML_RATE: {:.4f} +/- {:.4f} ({:.2f}%) --> {:.4E} +/- {:.4E} {:.2f}% skip={}".format(eband[ie],key,ar['det_like'],
|
||||
flx,dflx1,dflx2,100*dflx/flx,
|
||||
ml,dml,100*dml/ml,
|
||||
ecf0,decf0,frac0,skipme))
|
||||
if(skipme==True):
|
||||
continue
|
||||
ecf.append(ecf0)
|
||||
|
||||
|
||||
ecf_mean = np.mean(ecf)
|
||||
ecf_err = np.std(ecf, ddof=1)/np.sqrt(np.size(ecf))
|
||||
print("\n*** en{} ecf {:.4E} +/- {:.4E} {:.2f}% N={}".format(eband[ie],
|
||||
ecf_mean,
|
||||
ecf_err,100*ecf_err/ecf_mean,
|
||||
np.size(ecf)))
|
159
uds/uds/utils.py
159
uds/uds/utils.py
@ -1,5 +1,6 @@
|
||||
import os
|
||||
import sys
|
||||
import csv
|
||||
import numpy as np
|
||||
from astropy.io import fits
|
||||
from astropy import wcs
|
||||
@ -9,7 +10,7 @@ from astropy.io.fits import getdata
|
||||
import glob
|
||||
from fitsio import FITS
|
||||
from pathlib import Path
|
||||
|
||||
import pandas
|
||||
from astropy.table import QTable, Table, Column
|
||||
from astropy import units as u
|
||||
|
||||
@ -983,7 +984,7 @@ def create_sensmap(sensmap=None,expmap=None,backmap=None,detmask=None,areatab=No
|
||||
"ext_flag=no",
|
||||
"extlike_flag=no",
|
||||
"compress_flag=no",
|
||||
"area_flag=no",]
|
||||
"area_flag=yes",]
|
||||
remove_file(sensmap)
|
||||
remove_file(areatab)
|
||||
os.system((" ").join(cmd))
|
||||
@ -1005,3 +1006,157 @@ def group_spectra(path,group_min=25):
|
||||
]
|
||||
print((" ").join(cmd))
|
||||
os.system((" ").join(cmd))
|
||||
|
||||
def check_ermldet_forced(infile):
|
||||
|
||||
if(os.path.isfile(infile)==False):
|
||||
print("File not found {}".format(infile))
|
||||
|
||||
print("reading mllist {}".format(infile))
|
||||
|
||||
hdul = fits.open(infile)
|
||||
tbdata = hdul[1].data
|
||||
hdul.close()
|
||||
|
||||
for rec in tbdata:
|
||||
idsrc=rec['ID_SRC']
|
||||
boxid = rec['BOX_ID_SRC']
|
||||
if(idsrc != boxid):
|
||||
print("The ermldet catalog in forced mode should contain only unique sources.")
|
||||
print("Use fitpos_flag=no, fitext_flag=no, nmulsou=1")
|
||||
print("{} != {}".format(idsrc,boxid))
|
||||
sys.exit()
|
||||
|
||||
def make_catalog(infile=None, dlmin=6.0,dlmax=10,scale=60*60,ext_like=0.0,outkey='main',emin=None, emax=None, eband=None, ecf=None,
|
||||
infile_en03cat=None,infile_en03sens=None):
|
||||
if(os.path.isfile(infile)==False):
|
||||
print("File not found {}".format(infile))
|
||||
|
||||
print("reading catprep {}".format(infile))
|
||||
fout_selected=infile.replace(".fits", ".{}.selected.csv".format(outkey))
|
||||
fout_skip=infile.replace(".fits", ".{}.skip.reg".format(outkey))
|
||||
fout_extended=infile.replace(".fits", ".extended.reg")
|
||||
|
||||
""" Read en03 energy band """
|
||||
en03cat={}
|
||||
hdul = fits.open(infile_en03cat)
|
||||
en03tab = hdul[1].data
|
||||
hdul.close()
|
||||
|
||||
for rec in en03tab:
|
||||
key = rec['detUID']
|
||||
en03cat[key]={'ra':rec['RA'], 'dec':rec['DEC'], 'det_like':rec['DET_LIKE_0'],
|
||||
'ml_rate':rec['ML_RATE_0'], 'ml_rate_err':rec['ML_RATE_ERR_0'],
|
||||
'ml_flux':rec['ML_FLUX_0'], 'ml_flux_err':rec['ML_FLUX_ERR_0'],'sens':0.0}
|
||||
|
||||
print("reading {}".format(infile_en03sens))
|
||||
hdul = fits.open(infile_en03sens)
|
||||
en03sens = hdul[0].data
|
||||
en03hdr = hdul[0].header
|
||||
hdul.close()
|
||||
en03wcs = WCS(en03hdr)
|
||||
for key in en03cat.keys():
|
||||
ra=en03cat[key]['ra']
|
||||
dec=en03cat[key]['dec']
|
||||
crd = SkyCoord(ra, dec, frame=FK5(), unit="deg")
|
||||
pix = en03wcs.wcs_world2pix([(ra, dec),], 1)
|
||||
px=round(pix[0][0])-1
|
||||
py=round(pix[0][1])-1
|
||||
en03cat[key]['sens']=en03sens[py,px]
|
||||
#print(key,ra,dec,px,py,value)
|
||||
#(ra0,dec0), = en03wcs.wcs_pix2world(pix, 1)
|
||||
#crd0 = SkyCoord(ra0, dec0, frame=FK5(), unit="deg")
|
||||
#sep = crd.separation(crd0).arcsec
|
||||
|
||||
hdul = fits.open(infile)
|
||||
tbdata = hdul[1].data
|
||||
hdul.close()
|
||||
|
||||
catsel=[]
|
||||
catskip=[]
|
||||
catext=[]
|
||||
skip_count=0
|
||||
selected_count=0
|
||||
keepalive_count=0
|
||||
for rec in tbdata:
|
||||
src_crd = SkyCoord(rec['ra'], rec['dec'], frame=FK5(), unit="deg")
|
||||
key = rec['detUID']
|
||||
if not (key in en03cat.keys()):
|
||||
print("{} not found in {}".format(rec['detUID'],infile_en03cat))
|
||||
sys.exit()
|
||||
else:
|
||||
""" check coords """
|
||||
en03_crd = SkyCoord(en03cat[key]['ra'], en03cat[key]['dec'], frame=FK5(), unit="deg")
|
||||
sep = src_crd.separation(en03_crd).arcsec
|
||||
if(sep > 1.0):
|
||||
print("{} significant offset: {.4f} arcsec".format(key,sep))
|
||||
sys.exit()
|
||||
pass
|
||||
|
||||
if (rec['ext_like'] >= ext_like):
|
||||
catext.append({'ra':rec['ra'],'dec':rec['dec'],'radec_err':rec['radec_err'],
|
||||
'det_like':rec['det_like_0'],'ext_like':rec['ext_like'],
|
||||
'ext':rec['ext'],'ext_err':rec['ext_err'],
|
||||
'src_id':rec['id_src']})
|
||||
if ((rec['det_like_0'] > dlmin and rec['det_like_0'] < dlmax and rec['ext_like'] < ext_like)):
|
||||
|
||||
catsel.append({'ra':rec['ra'],
|
||||
'dec':rec['dec'],
|
||||
'radec_err':rec['radec_err'],
|
||||
'det_like':rec['det_like_0'],
|
||||
'src_id':rec['id_src'],
|
||||
'ml_rate':rec['ml_rate_0'],
|
||||
'ml_rate_err':rec['ml_rate_err_0'],
|
||||
'ml_flux':rec['ml_flux_0']/ecf,
|
||||
'ml_flux_err':rec['ml_flux_err_0']/ecf,
|
||||
'en03_dl':en03cat[key]['det_like'],
|
||||
'en03_flux':en03cat[key]['ml_flux'],
|
||||
'en03_flux_err':en03cat[key]['ml_flux_err'],
|
||||
'en03_sens':en03cat[key]['sens'],
|
||||
},
|
||||
)
|
||||
|
||||
selected_count=selected_count + 1
|
||||
else:
|
||||
catskip.append({'ra':rec['ra'],'dec':rec['dec'],'radec_err':rec['radec_err'],'det_like':rec['det_like_0'],
|
||||
'src_id':rec['id_src']})
|
||||
skip_count = skip_count+1
|
||||
|
||||
print("total={} skip_count={} selected={}".format(len(tbdata),skip_count,selected_count))
|
||||
|
||||
with open(fout_selected, 'w') as csvfile:
|
||||
fieldnames = ['src_id', 'ra', 'dec', 'radec_err', 'det_like','ml_rate','ml_rate_err','ml_flux','ml_flux_err','en03_dl','en03_flux','en03_flux_err','en03_sens']
|
||||
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
for rec in catsel:
|
||||
writer.writerow(rec)
|
||||
# writer.writerow({'src_id':rec['src_id'],
|
||||
# 'ra':rec['ra'],
|
||||
# 'dec':rec['dec'],
|
||||
# 'radec_err':rec['radec_err'],
|
||||
# 'det_like':rec['det_like']})
|
||||
return
|
||||
with open(fout_skip, 'w') as writer:
|
||||
for rec in catskip:
|
||||
writer.write("fk5;circle({}, {}, {}) # color=red text={{{} {:.2f}}}\n".format(rec['ra'],rec['dec'],rec['radec_err']/scale,
|
||||
rec['src_id'],rec['det_like']))
|
||||
|
||||
with open(fout_extended, 'w') as writer:
|
||||
for rec in catext:
|
||||
writer.write("fk5;circle({}, {}, {}) # color=magenta text={{{} {:.2f} {:.2f}}}\n".format(rec['ra'],
|
||||
rec['dec'],
|
||||
rec['ext']/scale,
|
||||
rec['src_id'],
|
||||
rec['det_like'],
|
||||
rec['ext_like']))
|
||||
def get_log_distr(infile=None, index_col='src_id', field=None, minval=10, maxval=100, nbin=50):
|
||||
logbins=np.logspace(np.log10(minval),np.log10(maxval), nbin)
|
||||
df = pandas.read_csv(infile, index_col=index_col)
|
||||
data = df[field]
|
||||
#hist, edges = np.histogram(array, bins=logbins)
|
||||
return data, logbins
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user