Compare commits
1 Commits
code_trimm
...
pre_github
Author | SHA1 | Date | |
---|---|---|---|
0c231202ae |
21
LICENSE.md
21
LICENSE.md
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Andrey Mukhin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@@ -1,2 +0,0 @@
|
||||
include nuwavsource/pixpos/*
|
||||
include nuwavsource/badpix_headers/*
|
50
README.md
50
README.md
@@ -1,49 +1 @@
|
||||
# nuwavsource
|
||||
|
||||
This package is supposed to be used to detect the sources in NuStar observations and generate a mask excluding the signal from the sources of any kind.
|
||||
|
||||
Additionaly, it generates a table containing:
|
||||
|
||||
Useful data about the observation:
|
||||
|
||||
1. OBS_ID
|
||||
2. Detector
|
||||
3. Coordinates in equatorial (ra,dec) and galactical (lon,lat) systems
|
||||
4. Time of the observation in seconds
|
||||
5. Exposure
|
||||
|
||||
Useful algorythm-related data:
|
||||
|
||||
6. Average count rate on unmasked area
|
||||
7. Portion of unmasked area
|
||||
8. Specific statistical metric[1] before and after masking the detected sources
|
||||
9. Root-mean-square of counts in unmasked area
|
||||
|
||||
## Installation
|
||||
This package is to be used with Python 3.x.x
|
||||
|
||||
To install tha package write
|
||||
|
||||
```bash
|
||||
pip install nuwavsource
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
To use the package in your project, import it in by writing
|
||||
|
||||
```python
|
||||
from nuwavsource import nuwavsource
|
||||
```
|
||||
|
||||
You can process the cl.evt file by creating an Observation class object:
|
||||
|
||||
```python
|
||||
obs = nuwavsource.Observation(path_to_evt_file)
|
||||
```
|
||||
|
||||
Additionally, the energy band in KeV to get events from can be passed as an argument. The default value is [3,20].
|
||||
|
||||
```python
|
||||
obs = nuwavsource.Observation(path_to_evt_file,E_borders=[E_min,E_max])
|
||||
```
|
||||
# wavsource_nustar
|
||||
|
176
get_region_archive_table.py
Normal file
176
get_region_archive_table.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# %%
|
||||
from get_region_pack import *
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from astropy.table import Table
|
||||
from astropy.coordinates import SkyCoord
|
||||
from astropy import units as u
|
||||
import multiprocessing
|
||||
from multiprocessing import get_context
|
||||
import warnings
|
||||
import time
|
||||
import os
|
||||
warnings.filterwarnings('ignore')
|
||||
# %%
|
||||
def ellipse(array):
|
||||
grid = np.indices(array.shape)[::-1]
|
||||
center = [((curr_axis*array).sum()/array.sum()) for curr_axis in grid]
|
||||
y,x = np.where(array)
|
||||
return center, np.abs(x-center[0]).max()*2, np.abs(y-center[1]).max()*2
|
||||
def mask_ellipse(array):
|
||||
(center_x, center_y), len_x, len_y = ellipse(array)
|
||||
x,y = np.indices(array.shape)[::-1]
|
||||
radius = ((x-center_x)/(0.5*len_x))**2+((y-center_y)/(0.5*len_y))**2
|
||||
return radius <= 1
|
||||
def poisson_divider(array):
|
||||
sub_arrays = np.zeros((4,180,180))
|
||||
for i in range(2):
|
||||
for j in range(2):
|
||||
sub_arrays[i+2*j] = array[180*i:180*(i+1),180*j:180*(j+1)].filled(-1000)
|
||||
pix_sum = 0
|
||||
for layer in sub_arrays:
|
||||
_masked_array = np.ma.masked_less(layer,0)
|
||||
pix_sum += ((_masked_array-_masked_array.mean())**2/_masked_array.mean()).sum()
|
||||
pix_sum /= np.logical_not(array.mask).sum()
|
||||
return pix_sum
|
||||
def process(argument):
|
||||
idx, obs_name = argument
|
||||
bin_num = 6
|
||||
try:
|
||||
obs = Observation(obs_name)
|
||||
sky_coord = SkyCoord(ra=obs.ra*u.deg,dec=obs.dec*u.deg,frame='fk5').transform_to('galactic')
|
||||
lon, lat = sky_coord.l.value, sky_coord.b.value
|
||||
rem_signal, rem_area, poiss_comp, rms = np.zeros((4,2**bin_num))
|
||||
region = np.zeros(obs.data.shape, dtype=bool)
|
||||
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
|
||||
masked_obs = np.ma.masked_array(obs.data, mask = region)
|
||||
good_lvl = np.zeros(bin_num,dtype=bool)
|
||||
good_idx = 0
|
||||
if obs.exposure > 1000:
|
||||
wav_obs = obs.wavdecomp('gauss',(5,3),occ_coeff=True)
|
||||
occ_coeff = obs.get_coeff()
|
||||
for idx, lvl in enumerate(binary_array(bin_num)):
|
||||
try:
|
||||
# region = np.array([layer>0 for layer in wav_obs[2:-1][lvl]]).sum(0).astype(bool)
|
||||
region = wav_obs[2:-1][lvl].sum(0)>0
|
||||
except ValueError:
|
||||
region = np.zeros(obs.data.shape,dtype=bool)
|
||||
masked_obs = np.ma.masked_array(obs.data, mask = region)*occ_coeff
|
||||
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
|
||||
rem_signal[idx] = 1-obs.data[region].sum()/obs.data.sum()
|
||||
rem_area[idx] = 1 - rem_region.sum()/np.logical_not(obs.data.mask).sum()
|
||||
# poiss_comp[idx] = poisson_divider(masked_obs)
|
||||
poiss_comp[idx] = np.mean((masked_obs-masked_obs.mean())**2/masked_obs.mean())
|
||||
rms[idx] = np.sqrt(((masked_obs-masked_obs.mean())**2).mean())
|
||||
parameter = lambda idx: ((poiss_comp[idx])**2+((1-rem_area[idx])*0.5)**2)
|
||||
if (parameter(idx)<parameter(good_idx)):
|
||||
good_idx = idx
|
||||
good_lvl = lvl
|
||||
try:
|
||||
# region = np.array([layer>0 for layer in wav_obs[2:-1][lvl]]).sum(0).astype(bool)
|
||||
region = wav_obs[2:-1][good_lvl].sum(0)>0
|
||||
except ValueError:
|
||||
region = np.zeros(obs.data.shape,dtype=bool)
|
||||
masked_obs = np.ma.masked_array(obs.data, mask = region)
|
||||
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
|
||||
to_table = [obs.obs_id,
|
||||
obs.det,
|
||||
obs.ra,
|
||||
obs.dec,
|
||||
lon,
|
||||
lat,
|
||||
obs.time_start,
|
||||
obs.exposure,
|
||||
masked_obs.mean()/obs.exposure, #count rate
|
||||
1 - rem_region.sum()/np.logical_not(obs.data.mask).sum(), #rem_area
|
||||
poiss_comp[good_idx],
|
||||
poiss_comp[0],
|
||||
rms[good_idx]
|
||||
]
|
||||
else:
|
||||
to_table = [obs.obs_id,
|
||||
obs.det,
|
||||
obs.ra,
|
||||
obs.dec,
|
||||
lon,
|
||||
lat,
|
||||
obs.time_start,
|
||||
obs.exposure,
|
||||
-1,
|
||||
-1, #rem_signal
|
||||
-1, #rem_area
|
||||
-1,
|
||||
-1,
|
||||
-1
|
||||
]
|
||||
return to_table, region.astype(int)
|
||||
except TypeError:
|
||||
return obs_name, np.zeros((360,360))
|
||||
#%%
|
||||
if __name__ == '__main__':
|
||||
start = time.perf_counter()
|
||||
processing = True
|
||||
start_new = True
|
||||
group_size = 50
|
||||
fits_folder = 'D:\Programms\Jupyter\Science\Source_mask\\\Archive\Processing_v8'
|
||||
region_folder = f'{fits_folder}\\Region'
|
||||
if not os.path.exists(fits_folder):
|
||||
os.makedirs(fits_folder)
|
||||
os.makedirs(region_folder)
|
||||
obs_list = get_link_list('E:\\Archive\\0[0-9]\\[0-9]',sort_list = True)[:]
|
||||
#FILTERING BY THE FILE SIZE
|
||||
print(f'Finished scanning folders. Found {len(obs_list)} observations.')
|
||||
table = {
|
||||
'obs_id':[], 'detector':[], 'ra':[], 'dec':[], 'lon':[], 'lat':[], 't_start':[], 'exposure':[],
|
||||
'count_rate':[], 'remaining_area':[], 'poisson_chi2':[], 'poisson_chi2_full':[], 'rms':[]
|
||||
}
|
||||
if start_new:
|
||||
out_table = pd.DataFrame(table)
|
||||
out_table.to_csv(f'{fits_folder}\\test.csv')
|
||||
# out_table.to_csv(f'{fits_folder}\\test_skipped.csv')
|
||||
os.system(f'copy D:\Programms\Jupyter\Science\Source_mask\Archive\Processing_v3\\test_skipped.csv {fits_folder}')
|
||||
#REMOVING PROCESSED OBSERVATIONS
|
||||
# already_processed = fits.getdata(f'{fits_folder}\\test.fits')['obs_name']
|
||||
already_processed_list = pd.read_csv(f'{fits_folder}\\test.csv',index_col=0,dtype={'obs_id':str})
|
||||
already_skipped_list = pd.read_csv(f'{fits_folder}\\test_skipped.csv',index_col=0,dtype={'obs_id':str})
|
||||
already_processed = (already_processed_list['obs_id'].astype(str)+already_processed_list['detector']).values
|
||||
already_skipped = (already_skipped_list['obs_id'].astype(str)+already_skipped_list['detector']).values
|
||||
obs_list_names = [curr[curr.index('nu')+2:curr.index('_cl.evt')-2] for curr in obs_list]
|
||||
not_processed = np.array([(curr not in already_processed) for curr in obs_list_names])
|
||||
not_skipped = np.array([(curr not in already_skipped) for curr in obs_list_names])
|
||||
obs_list = obs_list[np.logical_and(not_processed,not_skipped)]
|
||||
print(f'Removed already processed observations. {len(obs_list)} observations remain.')
|
||||
if processing:
|
||||
print('Started processing...')
|
||||
num = 0
|
||||
for group_idx in range(len(obs_list)//group_size+1):
|
||||
print(f'Started group {group_idx}')
|
||||
group_list = obs_list[group_size*group_idx:min(group_size*(group_idx+1),len(obs_list))]
|
||||
max_size = np.array([stat(file).st_size/2**20 for file in group_list]).max()
|
||||
process_num = 10 if max_size<50 else (5 if max_size<200 else (2 if max_size<1000 else 1))
|
||||
print(f"Max file size in group is {max_size:.2f}Mb, create {process_num} processes")
|
||||
with get_context('spawn').Pool(processes=process_num) as pool:
|
||||
for result,region in pool.imap(process,enumerate(group_list)):
|
||||
if type(result) is np.str_:
|
||||
obs_id = result[result.index('nu'):result.index('_cl.evt')]
|
||||
print(f'{num:>3} is skipped. File {obs_id}')
|
||||
num +=1
|
||||
continue
|
||||
for key,value in zip(table.keys(),result):
|
||||
table[key] = [value]
|
||||
if table['exposure'][0] < 1000:
|
||||
print(f'{num:>3} {str(result[0])+result[1]} is skipped. Exposure < 1000')
|
||||
pd.DataFrame(table).to_csv(f'{fits_folder}\\test_skipped.csv',mode='a',header=False)
|
||||
num +=1
|
||||
continue
|
||||
pd.DataFrame(table).to_csv(f'{fits_folder}\\test.csv',mode='a',header=False)
|
||||
fits.writeto(f'{region_folder}\\{str(result[0])+result[1]}_region.fits', region, overwrite= True)
|
||||
print(f'{num:>3} {str(result[0])+result[1]} is written.')
|
||||
num +=1
|
||||
print('Converting generated csv to fits file...')
|
||||
print(f'Current time in: {time.perf_counter()-start}')
|
||||
print(f'Processed {num/len(obs_list)*100:.2f} percent')
|
||||
csv_file = pd.read_csv(f'{fits_folder}\\test.csv',index_col=0,dtype={'obs_id':str})
|
||||
Table.from_pandas(csv_file).write(f'{fits_folder}\\test.fits',overwrite=True)
|
||||
print(f'Finished writing: {time.perf_counter()-start}')
|
||||
# %%
|
269
get_region_pack.py
Normal file
269
get_region_pack.py
Normal file
@@ -0,0 +1,269 @@
|
||||
# %%
|
||||
import import_ipynb
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import itertools
|
||||
|
||||
from os import listdir, mkdir, stat
|
||||
|
||||
from scipy.signal import fftconvolve, convolve2d
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.colors import SymLogNorm as lognorm
|
||||
|
||||
from astropy.io import fits
|
||||
from astropy.wcs import WCS
|
||||
|
||||
from glob import glob
|
||||
# %%
|
||||
def binary_array(num):
|
||||
variants = [[0,1],]*num
|
||||
out = np.zeros((2**num,num),bool)
|
||||
for idx, level in enumerate(itertools.product(*variants)):
|
||||
out[idx] = np.array(level,dtype=bool)
|
||||
return out
|
||||
def create_array(filename,mode='Sky'):
|
||||
temp = fits.getdata(filename,1)
|
||||
if mode == 'Sky':
|
||||
return np.histogram2d(temp['Y'],temp['X'],1000,[[0, 1000], [0, 1000]])[0]
|
||||
if mode == 'Det':
|
||||
return np.histogram2d(temp['DET1Y'],temp['DET1X'],360,[[0, 360], [0, 360]])[0]
|
||||
def get_wcs(file):
|
||||
header = file[1].header
|
||||
wcs = WCS({
|
||||
'CTYPE1': header['TCTYP38'], 'CTYPE2': header['TCTYP39'],
|
||||
'CUNIT1': header['TCUNI38'], 'CUNIT2': header['TCUNI39'],
|
||||
'CDELT1': header['TCDLT38'], 'CDELT2': header['TCDLT39'],
|
||||
'CRPIX1': header['TCRPX38'], 'CRPIX2': header['TCRPX39'],
|
||||
'CRVAL1': header['TCRVL38'], 'CRVAL2': header['TCRVL39'],
|
||||
'NAXIS1': header['TLMAX38'], 'NAXIS2': header['TLMAX39']
|
||||
})
|
||||
return wcs
|
||||
def get_link_list(folder, sort_list=False):
|
||||
links = glob(f'{folder}\\**\\*_cl.evt',recursive=True)
|
||||
sorted_list = sorted(links, key=lambda x: stat(x).st_size)
|
||||
return np.array(sorted_list)
|
||||
def atrous(level = 0, resize = False, max_size = 1001):
|
||||
base = 1/256*np.array([
|
||||
[1, 4, 6, 4, 1],
|
||||
[4, 16, 24, 16, 4],
|
||||
[6, 24, 36, 24, 6],
|
||||
[4, 16, 24, 16, 4],
|
||||
[1, 4, 6, 4, 1],
|
||||
])
|
||||
size = 2**level * (base.shape[0]-1)+1
|
||||
output = np.zeros((size,size))
|
||||
output[::2**level, ::2**level] = base
|
||||
if resize:
|
||||
output = np.pad(output, pad_width=2**(level+1))
|
||||
if output.shape[0]>max_size:
|
||||
return output[(size-1)//2-(max_size-1)//2:(size-1)//2+(max_size-1)//2+1, (size-1)//2-(max_size-1)//2:(size-1)//2+(max_size-1)//2+1]
|
||||
return output
|
||||
def gauss(level=0, resize=False, max_size = 1000):
|
||||
size = min(5*2**level+1 if not resize else 5*2**(level+1)+1, max_size)
|
||||
sigma = 2**(level)
|
||||
if sigma < 1:
|
||||
out = np.zeros((size+1,size+1))
|
||||
out[int((size-1)/2)+1][int((size-1)/2)+1] = 1
|
||||
return out
|
||||
A = 1/(2*np.pi*sigma**2)**0.5
|
||||
x = A*np.exp( (-(np.arange(size)-(size-1)//2)**2)/(2*sigma**2))
|
||||
out = np.multiply.outer(x,x)
|
||||
return out
|
||||
def sigma(mode='atrous',level=0):
|
||||
if mode=='atrous':
|
||||
sigma = [0.8908, 0.20066, 0.08551, 0.04122, 0.02042, 0.0114]
|
||||
elif mode=='gauss':
|
||||
sigma = [0.912579, 0.125101, 0.104892, 4.97810e-02, 2.46556e-02, 1.14364e-02]
|
||||
if level < 6:
|
||||
return sigma[level]
|
||||
else:
|
||||
return sigma[5]/2**(level-5)
|
||||
def adjecent(array):
|
||||
grid = np.array([
|
||||
[1,1,1],
|
||||
[1,0,1],
|
||||
[1,1,1]
|
||||
])
|
||||
output = fftconvolve(array,grid,mode='same') >= 0.5
|
||||
try:
|
||||
output = np.logical_and(np.logical_and(output, np.logical_not(array)),np.logical_not(array.mask))
|
||||
except AttributeError:
|
||||
output = np.logical_and(output, np.logical_not(array))
|
||||
output = np.argwhere(output == True)
|
||||
return output[:,0], output[:,1]
|
||||
def add_borders(array,middle=True):
|
||||
# array_blurred = convolve2d(array,np.ones((5,5)),mode='same')
|
||||
mask = np.zeros(array.shape)
|
||||
datax, datay = np.any(array>0,0), np.any(array>0,1)
|
||||
# datax, datay = np.any(array_blurred>0,0), np.any(array_blurred>0,1)
|
||||
#Add border masks
|
||||
x_min, y_min = np.argmax(datax), np.argmax(datay)
|
||||
x_max, y_max = len(datax) - np.argmax(datax[::-1]), len(datay) - np.argmax(datay[::-1])
|
||||
# x_mid_min, y_mid_min = x_min+10+np.argmin(datax[x_min+10:]), y_min+10+np.argmin(datay[y_min+10:])
|
||||
# x_mid_max, y_mid_max = x_max-10-np.argmin(datax[x_max-11::-1]), y_max-10-np.argmin(datay[y_max-11::-1])
|
||||
mask[y_min:y_max,x_min:x_max] = True
|
||||
if middle is True:
|
||||
mask[176:191,:] = False
|
||||
mask[:,176:191] = False
|
||||
# mask[y_mid_min:y_mid_max,:] = False
|
||||
# mask[:,x_mid_min:x_mid_max] = False
|
||||
mask = np.logical_not(mask)
|
||||
return mask
|
||||
def fill_poisson(array, size_input=32):
|
||||
if not(isinstance(array,np.ma.MaskedArray)):
|
||||
print('No mask found')
|
||||
return array
|
||||
size = size_input
|
||||
output = array.data.copy()
|
||||
mask = array.mask.copy()
|
||||
while mask.sum()>1:
|
||||
kernel = np.ones((size,size))/size**2
|
||||
# coeff = fftconvolve(np.logical_not(mask), kernel, mode='same')
|
||||
coeff = fftconvolve(np.logical_not(mask),kernel,mode='same')
|
||||
mean = fftconvolve(output,kernel,mode='same')
|
||||
idx = np.where(np.logical_and(mask,coeff>0.1))
|
||||
output[idx] = np.random.poisson(np.abs(mean[idx]/coeff[idx]))
|
||||
mask[idx] = False
|
||||
size *= 2
|
||||
return output
|
||||
def fill_mean(array,size_input=3):
|
||||
size = size_input
|
||||
if not(isinstance(array,np.ma.MaskedArray)):
|
||||
print('No mask found')
|
||||
return array
|
||||
output = array.filled(0)
|
||||
for i,j in zip(*np.where(array.mask)):
|
||||
output[i,j] = array[max(0,i-size):min(array.shape[0]-1,i+size+1),max(0,j-size):min(array.shape[1]-1,j+size+1)].mean()
|
||||
while np.isnan(output).any():
|
||||
size += 5
|
||||
for i,j in zip(*np.where(np.isnan(output))):
|
||||
output[i,j] = array[max(0,i-size):min(array.shape[0]-1,i+size+1),max(0,j-size):min(array.shape[1]-1,j+size+1)].mean()
|
||||
return output
|
||||
def mirror(array):
|
||||
size = array.shape[0]
|
||||
output = np.tile(array,(3,3))
|
||||
output[0:size] = np.flipud(output[0:size])
|
||||
output[2*size:3*size] = np.flipud(output[2*size:3*size])
|
||||
output[:,0:size] = np.fliplr(output[:,0:size])
|
||||
output[:,2*size:3*size] = np.fliplr(output[:,2*size:3*size])
|
||||
return output
|
||||
class Observation:
|
||||
def __init__(self, file_name, E_borders=[3,20]):
|
||||
self.filename = file_name
|
||||
self.name = file_name[file_name.find('nu'):].replace('_cl.evt','')
|
||||
with fits.open(file_name) as file:
|
||||
self.obs_id = file[0].header['OBS_ID']
|
||||
self.ra = file[0].header['RA_NOM']
|
||||
self.dec = file[0].header['DEC_NOM']
|
||||
self.time_start = file[0].header['TSTART']
|
||||
self.header = file[0].header
|
||||
self.det = self.header['INSTRUME'][-1]
|
||||
self.wcs = get_wcs(file)
|
||||
self.data = np.ma.masked_array(*self.get_data(file,E_borders))
|
||||
self.hard_mask = add_borders(self.data.data,middle=False)
|
||||
shift = shift = int((360-64)/2)
|
||||
self.model = (fits.getdata(f'D:\Programms\Jupyter\Science\Source_mask\Model/det1_fpm{self.det}.cxb.fits',0)
|
||||
*(180/np.pi/10150)**2)[shift:360+shift,shift:360+shift]
|
||||
self.exposure = self.header['EXPOSURE']
|
||||
# def get_coeff(self,folder='D:\\Programms\\Jupyter\\Science\\Source_mask\\Archive\\OCC_observations'):
|
||||
# try:
|
||||
# # _occ_list = get_link_list('D:\\Programms\\Jupyter\\Science\\Source_mask\\Archive\\OCC_observations')
|
||||
# _occ_list = get_link_list(folder)
|
||||
# occ_list = dict()
|
||||
# for name in _occ_list:
|
||||
# occ_list[name[name.index('nu'):name.index('02_cl.evt')]] = name
|
||||
# occ = Observation(occ_list[self.name[:-2]],E_borders=[10,20])
|
||||
# output = np.zeros((360,360))
|
||||
# for i in range(2):
|
||||
# for j in range(2):
|
||||
# _temp = occ.data[180*i:180*(i+1),180*j:180*(j+1)].mean()
|
||||
# output[180*i:180*(i+1),180*j:180*(j+1)] = _temp if _temp != 0 else 1
|
||||
# except KeyError:
|
||||
# output = np.ones((360,360))
|
||||
# out = output.min()/output
|
||||
# if np.any(out<0.8):
|
||||
# return np.ones((360,360))
|
||||
# else:
|
||||
# return out
|
||||
def get_coeff(self):
|
||||
# coeff = np.array([0.972,0.895,1.16,1.02]) if self.det=='A' else np.array([0.991,1.047,1.012,0.956])
|
||||
coeff = np.array([0.977,0.861,1.163,1.05]) if self.det=='A' else np.array([1.004,0.997,1.025,0.979])
|
||||
test = (coeff).reshape(2,2).repeat(180,0).repeat(180,1)
|
||||
return test
|
||||
def get_data(self, file, E_borders=[3,20]):
|
||||
PI_min, PI_max = (np.array(E_borders)-1.6)/0.04
|
||||
data = file[1].data.copy()
|
||||
idx_mask = (data['STATUS'].sum(1) == 0)
|
||||
idx_output = np.logical_and(idx_mask,np.logical_and((data['PI']>PI_min),(data['PI']<PI_max)))
|
||||
data_output = data[idx_output]
|
||||
data_mask = data[np.logical_not(idx_mask)]
|
||||
build_hist = lambda array: np.histogram2d(array['DET1Y'],array['DET1X'],360,[[0,360],[0,360]])[0]
|
||||
output = build_hist(data_output)
|
||||
mask = build_hist(data_mask)
|
||||
mask = np.logical_or(mask,add_borders(output))
|
||||
mask = np.logical_or(mask, self.get_bad_pix(file))
|
||||
return output, mask
|
||||
def get_bad_pix(self, file):
|
||||
output = np.zeros((360,360))
|
||||
kernel = np.ones((5,5))
|
||||
for i in range(4):
|
||||
pixpos_file = fits.getdata(f'D:\\Programms\\Jupyter\\Science\\Source_mask\\Pixpos\\nu{self.det}pixpos20100101v007.fits',i+1)
|
||||
bad_pix_file = file[3+i].data.copy()
|
||||
temp = np.zeros(len(pixpos_file),dtype=bool)
|
||||
for x,y in zip(bad_pix_file['rawx'],bad_pix_file['rawy']):
|
||||
temp = np.logical_or(temp, np.equal(pixpos_file['rawx'],x)*np.equal(pixpos_file['rawy'],y))
|
||||
temp = pixpos_file[temp]
|
||||
output += np.histogram2d(temp['REF_DET1Y'],temp['REF_DET1X'], 360, [[0,360],[0,360]])[0]
|
||||
output = convolve2d(output, kernel, mode='same') >0
|
||||
return output
|
||||
def wavdecomp(self, mode = 'atrous', thresh=False, iteration = 1,occ_coeff = False):
|
||||
#THRESHOLD
|
||||
if type(thresh) is int: thresh_max, thresh_add = thresh, thresh/2
|
||||
elif type(thresh) is tuple: thresh_max, thresh_add = thresh
|
||||
#INIT BASIC VARIABLES
|
||||
wavelet = globals()[mode]
|
||||
# max_level = int(np.ceil(np.log2(self.data.shape[0])))+1
|
||||
max_level = 8
|
||||
conv_out = np.zeros((max_level+1,self.data.shape[0],self.data.shape[1]))
|
||||
size = self.data.shape[0]
|
||||
#PREPARE ORIGINAL DATA FOR ANALYSIS: FILL THE HOLES + MIRROR + DETECTOR CORRECTION
|
||||
data = fill_poisson(self.data)
|
||||
if occ_coeff: data = data*self.get_coeff()
|
||||
data = mirror(data)
|
||||
data_bkg = data.copy()
|
||||
#ITERATIVELY CONDUCT WAVLET DECOMPOSITION
|
||||
for i in range(max_level):
|
||||
for num_iter in range(iteration):
|
||||
conv = fftconvolve(data,wavelet(i),mode='same')
|
||||
temp_out = data-conv
|
||||
#ERRORMAP CALCULATION
|
||||
if thresh_max != 0:
|
||||
sig = sigma(mode, i)
|
||||
bkg = fftconvolve(data_bkg, wavelet(i),mode='same')
|
||||
bkg[bkg<0] = 0
|
||||
# err = (1+np.sqrt(bkg/sig**2 + 0.75))*sig**3
|
||||
err = (1+np.sqrt(bkg+0.75))*sig
|
||||
significant = (np.abs(temp_out)> thresh_max*err)[size:2*size,size:2*size]
|
||||
# significant = (temp_out > thresh_max*err)[size:2*size,size:2*size]
|
||||
if thresh_add != 0:
|
||||
add_significant = (np.abs(temp_out)> thresh_add*err)[size:2*size,size:2*size]
|
||||
# add_significant = (temp_out > thresh_add*err)[size:2*size,size:2*size]
|
||||
adj = adjecent(significant)
|
||||
add_condition = np.logical_and(add_significant[adj[0],adj[1]],np.logical_not(significant[adj[0],adj[1]]))
|
||||
while (add_condition).any():
|
||||
to_add = adj[0][add_condition], adj[1][add_condition]
|
||||
significant[to_add[0],to_add[1]] = True
|
||||
adj = adjecent(significant)
|
||||
add_condition = np.logical_and(add_significant[adj[0],adj[1]],np.logical_not(significant[adj[0],adj[1]]))
|
||||
# add_condition = np.logical_and(np.abs(temp_out)[adj[0],adj[1]] >= thresh_add*err[adj[0],adj[1]], np.logical_not(significant)[adj[0],adj[1]])
|
||||
temp_out[size:2*size,size:2*size][np.logical_not(significant)] = 0
|
||||
#WRITING THE WAVELET DECOMP LAYER
|
||||
if temp_out[size:2*size,size:2*size].sum() == 0: break
|
||||
conv_out[i] = +temp_out[size:2*size,size:2*size]
|
||||
conv_out[i][conv_out[i]<0]=0 #leave only positive data to prevent problems while summing layers
|
||||
# conv_out[i] = significant
|
||||
data = conv
|
||||
conv_out[max_level] = conv[size:2*size,size:2*size]
|
||||
return conv_out
|
||||
# %%
|
@@ -1 +0,0 @@
|
||||
name = 'nuwavsource'
|
Binary file not shown.
Binary file not shown.
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 1 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET0 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMA ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuAuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #0 of the FPMA, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 2 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET1 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMA ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuAuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #1 of the FPMA, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 3 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET2 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMA ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuAuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #2 of the FPMA, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 4 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET3 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMA ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuAuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #3 of the FPMA, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,11 +0,0 @@
|
||||
SIMPLE = T / file does conform to FITS standard
|
||||
BITPIX = 16 / number of bits per data pixel
|
||||
NAXIS = 0 / number of data axes
|
||||
EXTEND = T / FITS dataset may contain extensions
|
||||
COMMENT FITS (Flexible Image Transport System) format is defined in 'Astronomy
|
||||
COMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMA ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
END
|
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 1 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET0 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMB ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuBuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #0 of the FPMB, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 2 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET1 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMB ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuBuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #1 of the FPMA, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 3 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET2 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMB ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuBuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #2 of the FPMA, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,46 +0,0 @@
|
||||
XTENSION= 'BINTABLE' / binary table extension
|
||||
BITPIX = 8 / 8-bit bytes
|
||||
NAXIS = 2 / 2-dimensional binary table
|
||||
NAXIS1 = 12 / width of table in bytes
|
||||
NAXIS2 = 64 / number of rows in table
|
||||
PCOUNT = 0 / size of special data area
|
||||
GCOUNT = 1 / one data group (required keyword)
|
||||
TFIELDS = 4 / number of fields in each row
|
||||
TTYPE1 = 'TIME ' / Start Time of Bad Pixel Interval
|
||||
TFORM1 = '1D ' / data format of field: 8-byte DOUBLE
|
||||
TUNIT1 = 's ' / physical unit of field
|
||||
TTYPE2 = 'RAWX ' / X-position in Raw Detector Coordinates
|
||||
TFORM2 = '1B ' / data format of field: BYTE
|
||||
TUNIT2 = 'pixel ' / physical unit of field
|
||||
TTYPE3 = 'RAWY ' / Y-position in Raw Detector Coordinates
|
||||
TFORM3 = '1B ' / data format of field: BYTE
|
||||
TUNIT3 = 'pixel ' / physical unit of field
|
||||
TTYPE4 = 'BADFLAG ' / Bad pixel flag
|
||||
TFORM4 = '16X ' / data format of field: BIT
|
||||
EXTNAME = 'BADPIX ' / Name of the binary table extension
|
||||
EXTVER = 4 / There shall be one instance of this extension f
|
||||
DETNAM = 'DET3 ' / CZT Detector ID (0,1,2 or 3)
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMB ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
VERSION = 1 / Extension version number
|
||||
FILENAME= 'nuBuserbadpix20100101v001.fits' / File name
|
||||
CONTENT = 'NuSTAR User Bad Pixel Table' / File content
|
||||
CCLS0001= 'BCF ' / Daset is a Basic Calibration File
|
||||
CDTP0001= 'DATA ' / Calibration file contains data
|
||||
CCNM0001= 'BADPIX ' / Type of calibration data
|
||||
CVSD0001= '2010-01-01' / Date when this file should first be used
|
||||
CVST0001= '00:00:00' / Time of day when this file should first be used
|
||||
CDES0001= 'NuSTAR User Bad Pixel Table' / Description
|
||||
COMMENT
|
||||
COMMENT This extension provides, for Detector #3 of the FPMA, the list of
|
||||
COMMENT user-defined bad pixels.
|
||||
COMMENT The BADFLAG column contains the bad pixel flags. These are a
|
||||
COMMENT combination of the following bit settings:
|
||||
COMMENT
|
||||
COMMENT b0000000000000001 Bad pixel from on-ground CALDB Bad Pixel File
|
||||
COMMENT b0000000000000010 Disabled pixel from on-board software
|
||||
COMMENT b0000000000000100 Bad pixels in the file provided by the user
|
||||
COMMENT
|
||||
END
|
@@ -1,11 +0,0 @@
|
||||
SIMPLE = T / file does conform to FITS standard
|
||||
BITPIX = 16 / number of bits per data pixel
|
||||
NAXIS = 0 / number of data axes
|
||||
EXTEND = T / FITS dataset may contain extensions
|
||||
COMMENT FITS (Flexible Image Transport System) format is defined in 'Astronomy
|
||||
COMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H
|
||||
TELESCOP= 'NuSTAR ' / Telescope (mission) name
|
||||
INSTRUME= 'FPMA ' / Instrument name (FPMA or FPMB)
|
||||
ORIGIN = 'Caltech ' / Source of FITS file
|
||||
CREATOR = 'FTOOLS 6.10 ' / Creator
|
||||
END
|
@@ -1,584 +0,0 @@
|
||||
# %%
|
||||
import numpy as np
|
||||
import itertools
|
||||
from pandas import DataFrame, read_csv
|
||||
from astropy.table import Table
|
||||
from astropy.coordinates import SkyCoord
|
||||
from astropy import units as u
|
||||
from multiprocessing import get_context, cpu_count
|
||||
from time import perf_counter
|
||||
from os import stat, makedirs
|
||||
from os.path import dirname
|
||||
from scipy.signal import fftconvolve, convolve2d
|
||||
from astropy.io import fits
|
||||
from astropy.wcs import WCS
|
||||
from glob import glob
|
||||
from warnings import filterwarnings
|
||||
filterwarnings('ignore')
|
||||
|
||||
|
||||
def get_link_list(folder: str, sort_list: bool = True) -> list[str]:
|
||||
"""
|
||||
Returns array of paths to all *_cl.evt files in the directory recursively.
|
||||
"""
|
||||
links = glob(f'{folder}\\**\\*_cl.evt', recursive=True)
|
||||
if sort_list:
|
||||
sorted_list = sorted(links, key=lambda x: stat(x).st_size)
|
||||
return np.array(sorted_list)
|
||||
else:
|
||||
return np.array(links)
|
||||
|
||||
|
||||
def binary_array(num: int) -> list[list[bool]]:
|
||||
"""
|
||||
Returns list of all possible combinations of num of bool values.
|
||||
"""
|
||||
variants = [[0, 1], ]*num
|
||||
out = np.zeros((2**num, num), bool)
|
||||
for idx, level in enumerate(itertools.product(*variants)):
|
||||
out[idx] = np.array(level, dtype=bool)
|
||||
return out
|
||||
|
||||
|
||||
def create_array(file_path: str, mode: str = 'Sky') -> list[int]:
|
||||
"""
|
||||
Returns a 2d array of counts for given observation file.
|
||||
Modes 'Sky' and 'Det' return arrays in (X,Y) and DET1 respectively.
|
||||
"""
|
||||
temp = fits.getdata(file_path, 1)
|
||||
if mode == 'Sky':
|
||||
return np.histogram2d(temp['Y'],
|
||||
temp['X'],
|
||||
1000,
|
||||
[[0, 1000], [0, 1000]])[0]
|
||||
if mode == 'Det':
|
||||
return np.histogram2d(temp['DET1Y'],
|
||||
temp['DET1X'],
|
||||
360,
|
||||
[[0, 360], [0, 360]])[0]
|
||||
|
||||
|
||||
def get_wcs(file):
|
||||
"""
|
||||
Returns WCS for given observation.
|
||||
Note that argument here is an opened fits file, not a path.
|
||||
"""
|
||||
header = file[1].header
|
||||
wcs = WCS({
|
||||
'CTYPE1': header['TCTYP38'], 'CTYPE2': header['TCTYP39'],
|
||||
'CUNIT1': header['TCUNI38'], 'CUNIT2': header['TCUNI39'],
|
||||
'CDELT1': header['TCDLT38'], 'CDELT2': header['TCDLT39'],
|
||||
'CRPIX1': header['TCRPX38'], 'CRPIX2': header['TCRPX39'],
|
||||
'CRVAL1': header['TCRVL38'], 'CRVAL2': header['TCRVL39'],
|
||||
'NAXIS1': header['TLMAX38'], 'NAXIS2': header['TLMAX39']
|
||||
})
|
||||
return wcs
|
||||
|
||||
|
||||
def atrous(level: int = 0, max_size: int = 1001) -> list[list[float]]:
|
||||
"""
|
||||
Returns a trou kernel with the size 2**level and corresponding shape.
|
||||
"""
|
||||
base = 1/256*np.array([
|
||||
[1, 4, 6, 4, 1],
|
||||
[4, 16, 24, 16, 4],
|
||||
[6, 24, 36, 24, 6],
|
||||
[4, 16, 24, 16, 4],
|
||||
[1, 4, 6, 4, 1],
|
||||
])
|
||||
size = 2**level * (base.shape[0]-1)+1
|
||||
output = np.zeros((size, size))
|
||||
output[::2**level, ::2**level] = base
|
||||
if output.shape[0] > max_size:
|
||||
return output[(size-1)//2-(max_size-1)//2:(size-1)//2+(max_size-1)//2+1,
|
||||
(size-1)//2-(max_size-1)//2:(size-1)//2+(max_size-1)//2+1]
|
||||
return output
|
||||
|
||||
|
||||
def atrous_sig(level: int = 0) -> float:
|
||||
# sig_values = [0.8908, 0.20066, 0.08551, 0.04122, 0.02042]
|
||||
sig_values = [0.8725, 0.1893, 0.0946, 0.0473, 0.0237]
|
||||
if level < 5:
|
||||
return sig_values[level]
|
||||
else:
|
||||
return sig_values[4]/2**(level-4)
|
||||
|
||||
|
||||
def gauss(level: int = 0, max_size: int = 1000) -> list[list[float]]:
|
||||
"""
|
||||
Returns gaussian kernel with sigma = 2**level
|
||||
"""
|
||||
size = min(5*2**(level+1)+1, max_size)
|
||||
sigma = 2**(level)
|
||||
A = 1/(2*np.pi*sigma**2)**0.5
|
||||
x = A*np.exp((-(np.arange(size)-(size-1)//2)**2)/(2*sigma**2))
|
||||
out = np.multiply.outer(x, x)
|
||||
return out
|
||||
|
||||
|
||||
def adjecent(array):
|
||||
"""
|
||||
Returns two lists of indices of cells adjecent or diagonal to non-zero cells of given array
|
||||
"""
|
||||
grid = np.array([
|
||||
[1, 1, 1],
|
||||
[1, 0, 1],
|
||||
[1, 1, 1]
|
||||
])
|
||||
output = fftconvolve(array, grid, mode='same') >= 0.5
|
||||
try:
|
||||
output = np.logical_and(np.logical_and(output, np.logical_not(array)),
|
||||
np.logical_not(array.mask))
|
||||
except AttributeError:
|
||||
output = np.logical_and(output, np.logical_not(array))
|
||||
return output
|
||||
|
||||
|
||||
def add_borders(array, middle=True):
|
||||
"""
|
||||
Returns border mask for an DET1 observation array
|
||||
"""
|
||||
mask = np.zeros(array.shape)
|
||||
datax, datay = np.any(array > 0, 0), np.any(array > 0, 1)
|
||||
# Add border masks
|
||||
x_min, y_min = np.argmax(datax), np.argmax(datay)
|
||||
x_max = len(datax) - np.argmax(datax[::-1])
|
||||
y_max = len(datay) - np.argmax(datay[::-1])
|
||||
mask[y_min:y_max, x_min:x_max] = True
|
||||
if middle is True:
|
||||
mask[176:191, :] = False
|
||||
mask[:, 176:191] = False
|
||||
mask = np.logical_not(mask)
|
||||
return mask
|
||||
|
||||
|
||||
def fill_poisson(array, size_input=32):
|
||||
"""
|
||||
Fills all masked elements of an array with poisson signal with local expected value.
|
||||
"""
|
||||
if not (isinstance(array, np.ma.MaskedArray)):
|
||||
print('No mask found')
|
||||
return array
|
||||
size = size_input
|
||||
output = array.data.copy()
|
||||
mask = array.mask.copy()
|
||||
while mask.sum() > 1:
|
||||
kernel = np.ones((size, size))/size**2
|
||||
coeff = fftconvolve(np.logical_not(mask), kernel, mode='same')
|
||||
mean = fftconvolve(output, kernel, mode='same')
|
||||
idx = np.where(np.logical_and(mask, coeff > 0.1))
|
||||
output[idx] = np.random.poisson(np.abs(mean[idx]/coeff[idx]))
|
||||
mask[idx] = False
|
||||
size *= 2
|
||||
return output
|
||||
|
||||
|
||||
def count_binning(array, count_per_bin: int = 2):
|
||||
_array = (array[array.mask == False]
|
||||
if hasattr(array, 'mask') else array)
|
||||
_array = (_array.flatten()
|
||||
if array.ndim != 1 else _array)
|
||||
|
||||
bin_sum, bin_size = [], []
|
||||
_sum, _count = 0, 0
|
||||
|
||||
for el in _array:
|
||||
_sum += el
|
||||
_count += 1
|
||||
if _sum >= count_per_bin:
|
||||
bin_sum.append(_sum)
|
||||
bin_size.append(_count)
|
||||
_sum, _count = 0, 0
|
||||
|
||||
return np.array(bin_sum), np.array(bin_size)
|
||||
|
||||
|
||||
def cstat(expected, data: list, count_per_bin: int = 2) -> float:
|
||||
_data = data.flatten()
|
||||
_data = _data[_data.mask == False]
|
||||
_expected = expected
|
||||
c_stat = 0
|
||||
bin_sum_array, bin_count_array = count_binning(_data, count_per_bin)
|
||||
bin_exp_array = bin_count_array * _expected
|
||||
c_stat = 2 * (bin_exp_array - bin_sum_array + bin_sum_array * (np.log(bin_sum_array / bin_exp_array))).mean()
|
||||
return c_stat
|
||||
|
||||
|
||||
class Observation:
|
||||
"""
|
||||
Main class, contains information about the observation given.
|
||||
"""
|
||||
def __init__(self, file_name, E_borders=[3, 20]):
|
||||
self.filename = file_name
|
||||
self.name = file_name[file_name.find('nu'):].replace('_cl.evt', '')
|
||||
with fits.open(file_name) as file:
|
||||
self.obs_id = file[0].header['OBS_ID']
|
||||
self.ra = file[0].header['RA_NOM']
|
||||
self.dec = file[0].header['DEC_NOM']
|
||||
self.time_start = file[0].header['TSTART']
|
||||
self.header = file[0].header
|
||||
self.exposure = self.header['EXPOSURE']
|
||||
self.det = self.header['INSTRUME'][-1]
|
||||
self.wcs = get_wcs(file)
|
||||
self.data = np.ma.masked_array(*self.get_data(file, E_borders))
|
||||
|
||||
def get_coeff(self):
|
||||
"""
|
||||
Returns normalalizing coefficients for different chips of the observation detector.
|
||||
Coefficients are obtained from stacked observations in OCC mode.
|
||||
"""
|
||||
coeff = np.array([0.977, 0.861, 1.163, 1.05]) if self.det == 'A' else np.array([1.004, 0.997, 1.025, 0.979])
|
||||
resized_coeff = (coeff).reshape(2, 2).repeat(180, 0).repeat(180, 1)
|
||||
return resized_coeff
|
||||
|
||||
def get_data(self, file, E_borders=[3, 20]):
|
||||
"""
|
||||
Returns masked array with DET1 image data for given energy band.
|
||||
Mask is created from observations badpix tables and to mask the border and gaps.
|
||||
"""
|
||||
PI_min, PI_max = (np.array(E_borders)-1.6)/0.04
|
||||
data = file[1].data.copy()
|
||||
idx_mask = (data['STATUS'].sum(1) == 0)
|
||||
idx_output = np.logical_and(idx_mask, np.logical_and((data['PI'] > PI_min), (data['PI'] < PI_max)))
|
||||
data_output = data[idx_output]
|
||||
data_mask = data[np.logical_not(idx_mask)]
|
||||
build_hist = lambda array: np.histogram2d(array['DET1Y'], array['DET1X'], 360, [[0, 360], [0, 360]])[0]
|
||||
output = build_hist(data_output)
|
||||
mask = build_hist(data_mask)
|
||||
mask = np.logical_or(mask, add_borders(output))
|
||||
mask = np.logical_or(mask, self.get_bad_pix(file))
|
||||
return output, mask
|
||||
|
||||
def get_bad_pix(self, file, threshold=0.9):
|
||||
"""
|
||||
Creates a mask for observation based on badpix tables.
|
||||
"""
|
||||
current_dir = dirname(__file__)
|
||||
output = np.ones((360, 360))
|
||||
for det_id in range(4):
|
||||
badpix = file[3 + det_id].data
|
||||
badpix_exp = (badpix['TIME_STOP'] - badpix['TIME'])/self.exposure
|
||||
pixpos = np.load(f'{current_dir}\\pixpos\\ref_pix{self.det}{det_id}.npy', allow_pickle=True).item()
|
||||
for raw_x, raw_y, exp in zip(badpix['RAWX'], badpix['RAWY'], badpix_exp):
|
||||
y, x = pixpos[(raw_x, raw_y)]
|
||||
output[x-3:x+11, y-3:y+11] -= exp
|
||||
output = np.clip(output, a_min=0, a_max=None)
|
||||
self.norm_exp_map = output
|
||||
return output < threshold
|
||||
|
||||
def exposure_corr(self, array):
|
||||
corr = 1 - self.norm_exp_map
|
||||
corr[corr > 0.1] = 0.
|
||||
correction_poiss = np.random.poisson(corr*array, corr.shape)
|
||||
return array + correction_poiss
|
||||
|
||||
def wavdecomp(self, mode='gauss', thresh=False, occ_coeff=False):
|
||||
"""
|
||||
Performs a wavelet decomposition of image.
|
||||
"""
|
||||
# THRESHOLD
|
||||
if type(thresh) is int:
|
||||
thresh_max, thresh_add = thresh, thresh/2
|
||||
elif type(thresh) is tuple:
|
||||
thresh_max, thresh_add = thresh
|
||||
# INIT NEEDED VARIABLES
|
||||
wavelet = globals()[mode]
|
||||
max_level = 8
|
||||
conv_out = np.zeros(
|
||||
(max_level+1, self.data.shape[0], self.data.shape[1])
|
||||
)
|
||||
size = self.data.shape[0]
|
||||
# PREPARE ORIGINAL DATA FOR ANALYSIS: FILL THE HOLES + MIRROR + DETECTOR CORRECTION
|
||||
data = self.exposure_corr(self.data)
|
||||
data = fill_poisson(self.data)
|
||||
if occ_coeff:
|
||||
data = data*self.get_coeff()
|
||||
data = np.pad(data, data.shape[0], mode='reflect')
|
||||
# ITERATIVELY CONDUCT WAVLET DECOMPOSITION
|
||||
for i in range(max_level):
|
||||
conv = fftconvolve(data, wavelet(i), mode='same')
|
||||
conv[conv < 0] = 0
|
||||
temp_out = data-conv
|
||||
# ERRORMAP CALCULATION
|
||||
if thresh_max != 0:
|
||||
if mode == 'gauss':
|
||||
sig = ((wavelet(i)**2).sum())**0.5
|
||||
if mode == 'atrous':
|
||||
sig = atrous_sig(i)
|
||||
bkg = fftconvolve(data, wavelet(i), mode='same')
|
||||
bkg[bkg < 0] = 0
|
||||
err = (1+np.sqrt(bkg+0.75))*sig
|
||||
significant = (temp_out > thresh_max*err)[size:2*size, size:2*size]
|
||||
if thresh_add != 0:
|
||||
add_significant = (temp_out > thresh_add*err)[size:2*size, size:2*size]
|
||||
adj = adjecent(significant)
|
||||
add_condition = np.logical_and(adj, add_significant)
|
||||
while (add_condition).any():
|
||||
significant = np.logical_or(significant, add_condition)
|
||||
adj = adjecent(significant)
|
||||
add_condition = np.logical_and(adj, add_significant)
|
||||
significant = np.pad(significant, significant.shape[0], mode='reflect')
|
||||
temp_out[np.logical_not(significant)] = 0
|
||||
# WRITING THE WAVELET DECOMP LAYER
|
||||
conv_out[i] = +temp_out[size:2*size, size:2*size]
|
||||
data = conv
|
||||
conv_out[max_level] = conv[size:2*size, size:2*size]
|
||||
return conv_out
|
||||
|
||||
def region_to_raw(self, region):
|
||||
"""
|
||||
Returns a hdu_list with positions of masked pixels in RAW coordinates.
|
||||
"""
|
||||
x_region, y_region = np.where(region)
|
||||
hdus = []
|
||||
for i in range(4):
|
||||
current_dir = dirname(__file__)
|
||||
pixpos = Table(fits.getdata(f'{current_dir}\\pixpos\\nu{self.det}pixpos20100101v007.fits', i+1))
|
||||
pixpos = pixpos[pixpos['REF_DET1X'] != -1]
|
||||
|
||||
test = np.zeros(len(pixpos['REF_DET1X']), dtype=bool)
|
||||
for idx, (x, y) in enumerate(zip(pixpos['REF_DET1X'], pixpos['REF_DET1Y'])):
|
||||
test[idx] = np.logical_and(np.equal(x, x_region), np.equal(y, y_region)).any()
|
||||
|
||||
positions = np.array((pixpos['RAWX'][test], pixpos['RAWY'][test]))
|
||||
if sum(test) != 0:
|
||||
positions = np.unique(positions, axis=1)
|
||||
rawx, rawy = positions[0], positions[1]
|
||||
|
||||
time_start = float(78913712)
|
||||
bad_flag = np.zeros(16, dtype=bool)
|
||||
bad_flag[13] = 1
|
||||
|
||||
columns = []
|
||||
columns.append(fits.Column('TIME', '1D', 's', array=len(rawx) * [time_start]))
|
||||
columns.append(fits.Column('RAWX', '1B', 'pixel', array=rawx))
|
||||
columns.append(fits.Column('RAWY', '1B', 'pixel', array=rawy))
|
||||
columns.append(fits.Column('BADFLAG', '16X', array=len(rawx) * [bad_flag]))
|
||||
|
||||
hdu = fits.BinTableHDU.from_columns(columns)
|
||||
naxis1, naxis2 = hdu.header['NAXIS1'], hdu.header['NAXIS2']
|
||||
hdu.header = fits.Header.fromtextfile(f'{current_dir}\\badpix_headers\\nu{self.det}userbadpixDET{i}.txt')
|
||||
hdu.header['NAXIS1'] = naxis1
|
||||
hdu.header['NAXIS2'] = naxis2
|
||||
hdus.append(hdu)
|
||||
|
||||
primary_hdu = fits.PrimaryHDU()
|
||||
primary_hdu.header = fits.Header.fromtextfile(f'{current_dir}\\badpix_headers\\nu{self.det}userbadpix_main.txt')
|
||||
hdu_list = fits.HDUList([
|
||||
primary_hdu,
|
||||
*hdus
|
||||
])
|
||||
return hdu_list
|
||||
|
||||
|
||||
def process(args):
|
||||
"""
|
||||
Creates a mask using wavelet decomposition and produces some statistical and metadata about the passed observation.
|
||||
args must contain two arguments: path to the file of interest and threshold, e.g. ('D:\Data\obs_cl.evt',(5,2))
|
||||
"""
|
||||
obs_path, thresh = args
|
||||
bin_num = 6
|
||||
try:
|
||||
obs = Observation(obs_path)
|
||||
sky_coord = SkyCoord(ra=obs.ra*u.deg, dec=obs.dec*u.deg, frame='fk5').transform_to('galactic')
|
||||
lon, lat = sky_coord.l.value, sky_coord.b.value
|
||||
rem_signal, rem_area, poiss_comp, rms = np.zeros((4, 2**bin_num))
|
||||
region = np.zeros(obs.data.shape, dtype=bool)
|
||||
region_raw = -1
|
||||
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
|
||||
masked_obs = np.ma.masked_array(obs.data, mask=region)
|
||||
good_lvl = np.zeros(bin_num, dtype=bool)
|
||||
good_idx = 0
|
||||
if obs.exposure > 1000:
|
||||
wav_obs = obs.wavdecomp('atrous', thresh, occ_coeff=True)
|
||||
occ_coeff = obs.get_coeff()
|
||||
binary_arr = binary_array(bin_num)
|
||||
|
||||
for idx, lvl in enumerate(binary_arr):
|
||||
try:
|
||||
region = wav_obs[2:-1][lvl].sum(0) > 0
|
||||
except ValueError:
|
||||
region = np.zeros(obs.data.shape, dtype=bool)
|
||||
|
||||
masked_obs = np.ma.masked_array(obs.data, mask=region)*occ_coeff
|
||||
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
|
||||
rem_signal[idx] = 1-obs.data[region].sum()/obs.data.sum()
|
||||
rem_area[idx] = 1 - rem_region.sum()/np.logical_not(obs.data.mask).sum()
|
||||
poiss_comp[idx] = cstat(masked_obs.mean(), masked_obs)
|
||||
rms[idx] = np.sqrt(((masked_obs-masked_obs.mean())**2).mean())
|
||||
|
||||
for idx in range(len(poiss_comp)):
|
||||
if ((poiss_comp[idx] < poiss_comp[good_idx]) and
|
||||
(poiss_comp[idx] < poiss_comp[-1] + 0.05) and
|
||||
(rem_area[idx] > rem_area[-1])):
|
||||
good_idx = idx
|
||||
if good_idx == 0:
|
||||
good_idx = len(binary_arr) - 1
|
||||
good_lvl = binary_arr[good_idx]
|
||||
|
||||
try:
|
||||
region = wav_obs[2:-1][good_lvl].sum(0) > 0
|
||||
if region.sum() > 0:
|
||||
region_raw = obs.region_to_raw(region.astype(int))
|
||||
except ValueError:
|
||||
region = np.zeros(obs.data.shape, dtype=bool)
|
||||
masked_obs = np.ma.masked_array(obs.data, mask=region)
|
||||
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
|
||||
to_table = [obs.obs_id,
|
||||
obs.det,
|
||||
obs.ra,
|
||||
obs.dec,
|
||||
lon,
|
||||
lat,
|
||||
obs.time_start,
|
||||
obs.exposure,
|
||||
masked_obs.mean()/obs.exposure, # count rate
|
||||
1 - rem_region.sum()/np.logical_not(obs.data.mask).sum(), # rem_area
|
||||
poiss_comp[good_idx],
|
||||
poiss_comp[0],
|
||||
rms[good_idx]
|
||||
]
|
||||
else:
|
||||
to_table = [obs.obs_id,
|
||||
obs.det,
|
||||
obs.ra,
|
||||
obs.dec,
|
||||
lon,
|
||||
lat,
|
||||
obs.time_start,
|
||||
obs.exposure,
|
||||
-1,
|
||||
-1, # rem_signal
|
||||
-1, # rem_area
|
||||
-1,
|
||||
-1,
|
||||
-1
|
||||
]
|
||||
return to_table, region.astype(int), region_raw
|
||||
except TypeError:
|
||||
return obs_path, -1, -1
|
||||
|
||||
|
||||
def process_folder(input_folder=None, start_new_file=None, fits_folder=None, thresh=None):
|
||||
"""
|
||||
Generates a fits-table of parameters, folder with mask images in DET1 and BADPIX tables in RAW for all observations in given folder.
|
||||
Note that observations with exposure < 1000 sec a skipped.
|
||||
start_new_file can be either 'y' or 'n'.
|
||||
thresh must be a tuple, e.g. (5,2).
|
||||
"""
|
||||
# DIALOGUE
|
||||
if not (input_folder):
|
||||
print('Enter path to the input folder')
|
||||
input_folder = input()
|
||||
if not (start_new_file):
|
||||
print('Create new file for this processing? y/n')
|
||||
start_new_file = input()
|
||||
if start_new_file == 'y':
|
||||
start_new = True
|
||||
elif start_new_file == 'n':
|
||||
start_new = False
|
||||
else:
|
||||
print('Cannot interprete input, closing script')
|
||||
raise SystemExit(0)
|
||||
if not (fits_folder):
|
||||
print(f'Enter path to the output folder')
|
||||
fits_folder = input()
|
||||
region_folder = f'{fits_folder}\\Region'
|
||||
region_raw_folder = f'{fits_folder}\\Region_raw'
|
||||
if not thresh:
|
||||
print('Enter threshold values for wavelet decomposition:')
|
||||
print('General threshold:')
|
||||
_thresh_max = float(input())
|
||||
print('Additional threshold:')
|
||||
_thresh_add = float(input())
|
||||
thresh = (_thresh_max, _thresh_add)
|
||||
# CREATE ALL NECESSARY FILES AND VARIBALES
|
||||
obs_list = get_link_list(input_folder, sort_list=True)
|
||||
start = perf_counter()
|
||||
group_size = 50
|
||||
makedirs(region_folder, exist_ok=True)
|
||||
makedirs(region_raw_folder, exist_ok=True)
|
||||
# FILTERING BY THE FILE SIZE
|
||||
print(f'Finished scanning folders. Found {len(obs_list)} observations.')
|
||||
table = {
|
||||
'obs_id': [], 'detector': [], 'ra': [], 'dec': [],
|
||||
'lon': [], 'lat': [], 't_start': [], 'exposure': [],
|
||||
'count_rate': [], 'remaining_area': [], 'poisson_stat': [],
|
||||
'poisson_stat_full': [], 'rms': []
|
||||
}
|
||||
if start_new:
|
||||
out_table = DataFrame(table)
|
||||
out_table.to_csv(f'{fits_folder}\\test.csv')
|
||||
out_table.to_csv(f'{fits_folder}\\test_skipped.csv')
|
||||
# FILTERING OUT PROCESSED OBSERVATIONS
|
||||
already_processed_list = read_csv(
|
||||
f'{fits_folder}\\test.csv',
|
||||
index_col=0,
|
||||
dtype={'obs_id': str}
|
||||
)
|
||||
already_skipped_list = read_csv(
|
||||
f'{fits_folder}\\test_skipped.csv',
|
||||
index_col=0,
|
||||
dtype={'obs_id': str}
|
||||
)
|
||||
already_processed = (
|
||||
already_processed_list['obs_id'].astype(str) +
|
||||
already_processed_list['detector']
|
||||
).values
|
||||
already_skipped = (
|
||||
already_skipped_list['obs_id'].astype(str) +
|
||||
already_skipped_list['detector']
|
||||
).values
|
||||
obs_list_names = [
|
||||
curr[curr.index('nu')+2:curr.index('_cl.evt')-2]
|
||||
for curr in obs_list
|
||||
]
|
||||
not_processed = np.array([
|
||||
(curr not in already_processed)
|
||||
for curr in obs_list_names
|
||||
])
|
||||
not_skipped = np.array([
|
||||
(curr not in already_skipped)
|
||||
for curr in obs_list_names
|
||||
])
|
||||
obs_list = obs_list[np.logical_and(not_processed, not_skipped)]
|
||||
print(f'Removed already processed observations. {len(obs_list)} observations remain.')
|
||||
# START PROCESSING
|
||||
print('Started processing...')
|
||||
num = 0
|
||||
for group_idx in range(len(obs_list)//group_size+1):
|
||||
print(f'Started group {group_idx}')
|
||||
group_list = obs_list[group_size*group_idx:min(group_size*(group_idx+1), len(obs_list))]
|
||||
max_size = np.array([
|
||||
stat(file).st_size/2**20
|
||||
for file in group_list
|
||||
]).max()
|
||||
process_num = (cpu_count() if max_size < 50 else (cpu_count()//2 if max_size < 200 else (cpu_count()//4 if max_size < 1000 else 1)))
|
||||
print(f"Max file size in group is {max_size:.2f}Mb, create {process_num} processes")
|
||||
with get_context('spawn').Pool(processes=process_num) as pool:
|
||||
packed_args = map(lambda _: (_, thresh), group_list)
|
||||
for result, region, region_raw in pool.imap(process, packed_args):
|
||||
if type(result) is np.str_:
|
||||
obs_id = result[result.index('nu'):result.index('_cl.evt')]
|
||||
print(f'{num:>3} is skipped. File {obs_id}')
|
||||
num += 1
|
||||
continue
|
||||
for key, value in zip(table.keys(), result):
|
||||
table[key] = [value]
|
||||
if table['exposure'][0] < 1000:
|
||||
print(f'{num:>3} {str(result[0])+result[1]} is skipped. Exposure < 1000')
|
||||
DataFrame(table).to_csv(f'{fits_folder}\\test_skipped.csv', mode='a', header=False)
|
||||
num +=1
|
||||
continue
|
||||
DataFrame(table).to_csv(f'{fits_folder}\\test.csv', mode='a', header=False)
|
||||
fits.writeto(f'{region_folder}\\{str(result[0])+result[1]}_region.fits', region, overwrite=True)
|
||||
if region_raw != -1:
|
||||
region_raw.writeto(f'{region_raw_folder}\\{str(result[0])+result[1]}_reg_raw.fits', overwrite=True)
|
||||
print(f'{num:>3} {str(result[0])+result[1]} is written.')
|
||||
num +=1
|
||||
print('Converting generated csv to fits file...')
|
||||
print(f'Current time in: {(perf_counter()-start):.2f}')
|
||||
print(f'Processed {num/len(obs_list)*100:.2f} percent')
|
||||
csv_file = read_csv(f'{fits_folder}\\test.csv', index_col=0, dtype={'obs_id': str})
|
||||
Table.from_pandas(csv_file).write(f'{fits_folder}\\test.fits', overwrite=True)
|
||||
print(f'Finished writing: {perf_counter()-start}')
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,5 +0,0 @@
|
||||
astropy==5.1
|
||||
numpy==1.23.2
|
||||
pandas==1.4.4
|
||||
scipy==1.9.1
|
||||
setuptools==57.4.0
|
29
setup.py
29
setup.py
@@ -1,29 +0,0 @@
|
||||
import setuptools
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setuptools.setup(
|
||||
name="nuwavsource",
|
||||
version="0.0.8",
|
||||
author="Andrey Mukhin",
|
||||
author_email="amukhin@phystech.edu",
|
||||
description="A package for source exclusion in NuStar observation data using wavelet decomposition",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/Andreyousan/nuwavsource",
|
||||
packages=setuptools.find_packages(),
|
||||
include_package_data=True,
|
||||
classifiers=(
|
||||
"Programming Language :: Python :: 3",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
),
|
||||
install_requires = [
|
||||
'astropy==5.1',
|
||||
'numpy==1.23.2',
|
||||
'pandas==1.4.4',
|
||||
'scipy==1.9.1',
|
||||
'setuptools==57.4.0',
|
||||
]
|
||||
)
|
Reference in New Issue
Block a user