Compare commits

..

No commits in common. "master" and "code_trimming" have entirely different histories.

32 changed files with 625 additions and 852 deletions

View File

@ -1,3 +1,2 @@
include nuwavdet/pixpos/*
include nuwavdet/badpix_headers/*
include nuwavsource/pixpos/*
include nuwavsource/badpix_headers/*

101
README.md
View File

@ -1,67 +1,10 @@
# nuwavdet
# nuwavsource
This pacakge is used to generate region masks separating any focused X-ray flux from background signal in NuSTAR observations.
This package is supposed to be used to detect the sources in NuStar observations and generate a mask excluding the signal from the sources of any kind.
## Installation
This package is to be used with Python 3.x.x
```bash
pip install git+http://heagit.cosmos.ru/nustar/nuwavdet.git
```
Additionaly, it generates a table containing:
To update the package to the current version one should delete the previous version
```bash
pip uninstall nuwavdet
```
And simply repeat the intallation procedure again from the repository.
## Installation verification
If the installation was successful the package can be used with the following import:
```python
from nuwavdet import nuwavdet as nw
```
To verify the installation we suggest running a simple script:
```python
from nuwavdet import nuwavdet as nw
print(nw.binary_array(2))
```
The output of the script should be
```bash
[[False False]
[False True]
[ True False]
[ True True]]
```
## Main use
The main functionality of the package is presented with a single function
```python
nw.process(obs_path, thresh)
```
Inputs are string with path to the _cl.evt file to use and a tuple of thresholds, e.g.
```python
nw.process('D:\\Data\\obs_cl.evt', (3, 2))
```
The detailed script description of the data extraction with the script is presented in the examples folder of the repository.
The function nw.process returns severl python objects:
1. python-dictionary with some metadata and properties of the observation after mask generation procedure.
2. region array with mask in DET1 coordinate frame. Note that this mask is for numpy mask application so True (1) corresponds to masked pixel and False (0) otherwise.
3. custom bad pixel table with flagged pixels in RAW coordinates. It can be exported as fits file for further application to the nupipeline as fpma_userbpfile or fpmb_userbpfile.
4. array with the sum of wavelet planes for potential alternative applications.
Metadata about the observation returned by the nw.process is:
Observation metadata:
Useful data about the observation:
1. OBS_ID
2. Detector
@ -71,14 +14,36 @@ Observation metadata:
Useful algorythm-related data:
6. Average count rate of unmasked area
7. Fraction of unmasked area
8. Modified Cash-statistic per bin before and after masking the detected sources
6. Average count rate on unmasked area
7. Portion of unmasked area
8. Specific statistical metric[1] before and after masking the detected sources
9. Root-mean-square of counts in unmasked area
## Other uses
## Installation
This package is to be used with Python 3.x.x
Other possbile usecases are shown in the examples folder.
To install tha package write
## Contact information
```bash
pip install nuwavsource
```
If you have any questions or issues with the code, feel free to contact Andrey Mukhin: amukhin@cosmos.ru
## Usage
To use the package in your project, import it in by writing
```python
from nuwavsource import nuwavsource
```
You can process the cl.evt file by creating an Observation class object:
```python
obs = nuwavsource.Observation(path_to_evt_file)
```
Additionally, the energy band in KeV to get events from can be passed as an argument. The default value is [3,20].
```python
obs = nuwavsource.Observation(path_to_evt_file,E_borders=[E_min,E_max])
```

View File

@ -1,54 +0,0 @@
from nuwavdet import nuwavdet as nw
OBS_PATH = r'.//path_to_obs//nu<obsid><DET>01_cl.evt'
THRESH = (3, 2)
SAVE_BADPIX_PATH = r'.//out//badpix.fits'
SAVE_REGION_PATH = r'.//out//region.fits'
SAVE_WAVSUM_PATH = r'.//out//wavsum.fits'
METADATA_PATH = r'.//out//metadata.csv'
METADATA_FITS_PATH = r'.//out//metadata.fits'
if __name__ == '__main__':
# PROCESS THE OBSERVATION WITH GIVEN THRESHOLD
result, region, region_raw, wav_sum = nw.process(OBS_PATH, thresh=THRESH)
# SAVE THE REGION BAD PIXEL FILES TO THE FITS FILE WITH NUPIPELINE
# COMPATIBLE FORMAT AND HEADERS.
region_raw.writeto(SAVE_BADPIX_PATH)
# SAVE REGION MASK AS A FITS IMAGE
nw.save_region(region, SAVE_REGION_PATH, overwrite=False)
# Note that the Python script uses numpy masked array with
# True (1) as as masked and False (0) as unmasked pixel.
# nw.save_region transfers the numpy masked array to
# conventional format with 1 for unmasked and 0 for masked pixel.
# However, if mask is used in the Python you need to transfer it back with
# numpy.logical_not(mask).
# SAVE WAVSUM ARRAY AS A FITS IMAGE
nw.fits.writeto(SAVE_WAVSUM_PATH, wav_sum, overwrite=False)
# SAVE METADATA
# WE SUGGEST SAVING ALL THE METADATA FOR SEVERAL OBSERVATIONS
# IN ONE FILE.
# CREATE CSV FILE TO SAVE DATA
# IF FILE ALREADY EXISTS YOU SHOULD REMOVE THIS BLOCK FROM YOUR CODE
table = {
'obs_id': [], 'detector': [], 'ra': [], 'dec': [],
'lon': [], 'lat': [], 't_start': [], 'exposure': [],
'count_rate': [], 'remaining_area': [], 'cash_stat': [],
'cash_stat_full': []
}
out_table = nw.DataFrame(table)
out_table.to_csv(METADATA_PATH)
# SAVE DATA TO CREATED CSV
nw.DataFrame(result, index=[0]).to_csv(METADATA_PATH, mode='a', header=False)
# TRANSFORM THE CSV TO FITS-TABLE
nw.csv_to_table(METADATA_PATH, METADATA_FITS_PATH)

View File

@ -1,33 +0,0 @@
from nuwavdet import nuwavdet as nw
INPUT_FOLDER = r'path_to_directory'
OUTPUT_FOLDER = r'.//Output'
if __name__ == '__main__':
# BEGIN PROCESSING ALL THE OBSERVATIONS INSIDE THE FOLDER
nw.process_folder(input_folder=INPUT_FOLDER,
start_new_file='y',
fits_folder=OUTPUT_FOLDER,
thresh=(3, 2),
cpu_num=10
)
# IF THE PROCESSING WAS INTERRUPTED YOU CAN CONTINUE IT WITH THE SAME CODE
# BY CHANGING THE start_new_file TO 'n'.
# THE STRUCTURE OF THE FOLDER IS
# OUTPUT_FOLDER
# __overview.csv csv-table with observations metadata
# __overvies.fits fits-table with the same metadata
# __overview_skipped.csv csv-table with the skipped observations
# __Region folder for region mask images
# ____<obsid><DET>_region.fits
# __Region_raw folder for region masks in RAW coordinates
# ____<obsid><DET>_reg_raw.fits
# __Wav_sum folder for sum of wavelet layers
# ____<obsid><DET>_wav_sum.fits
# Note nw.process_folder uses multiprocessing with cpu_num cores.
# The number of cores can be manually chosen or automatically
# detected if cpu_num = 0.

View File

@ -1,23 +0,0 @@
from nuwavdet import nuwavdet as nw
OBS_PATH = r'.//path_to_obs//nu<obsid><DET>01_cl.evt'
THRESH = (3, 2)
if __name__ == '__main__':
# CREATE THE OBSERVATION CLASS OBJECT
obs = nw.Observation(OBS_PATH)
# CALCULATE THE WAVLET LAYERS WITH GIVEN THRESHOLD
wav_layers = obs.wavdecomp(mode='atrous', occ_coeff=True, thresh=THRESH)
# ALL THE LAYERS CAN BE ACCESSED AS AN ELEMENT OF wav_layers VARIABLE
# wav_layers[0] for the 1st wavelet layer
# wav_layers[4] for 5th wavelet layer
# wav_layers[-1] for the last wavelet layer
# wav_layers[2:5] for the list of the layers from 3 to 5
# wav_layers[[1, 3, 5]] for the list of layers 2, 4 and 6
# To calculate the sum of wavelet layers one should use sum() method
# wav_layers[2:7].sum(0) returns a sum of layers from 3 to 7
# wav_layers[[1, 3, 5]].sum(0) returns a sum of layers 2, 4 and 6.

View File

@ -1,23 +0,0 @@
from nuwavdet import nuwavdet as nw
import numpy as np
OBS_PATH = r'.//path_to_obs//nu<obsid><DET>01_cl.evt'
MASK_PATH = r'.//path_to_mask//<obsid><DET>.fits'
if __name__ == '__main__':
# CREATE THE OBSERVATION CLASS OBJECT
obs = nw.Observation(OBS_PATH)
# READ THE REGION MASK FILE
region = nw.fits.getdata(MASK_PATH)
# TRANSFORM REGION MASK DATA TO NUMPY MASK DATA (SEE 1_save_results.py).
region = np.logical_not(region.astype(bool))
# CREATE MASKED ARRAY CLASS OBJECT
masked_data = np.ma.masked_array(obs, mask=region)
# CALCULATE THE CSTAT ON THE MASKED DATA
print(nw.сstat(masked_data.mean(), masked_data))

View File

@ -1 +0,0 @@
name = 'nuwavdet'

1
nuwavsource/__init__.py Normal file
View File

@ -0,0 +1 @@
name = 'nuwavsource'

Binary file not shown.

Binary file not shown.

View File

@ -1,18 +1,17 @@
import itertools
# %%
import numpy as np
import os
import itertools
from pandas import DataFrame, read_csv
from scipy.signal import fftconvolve
from astropy import units as u
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from multiprocessing import get_context, cpu_count
from time import perf_counter
from os import stat, makedirs
from os.path import dirname
from scipy.signal import fftconvolve, convolve2d
from astropy.io import fits
from astropy.wcs import WCS
from time import perf_counter
from multiprocessing import get_context, cpu_count
from glob import glob
from warnings import filterwarnings
filterwarnings('ignore')
@ -22,22 +21,14 @@ def get_link_list(folder: str, sort_list: bool = True) -> list[str]:
"""
Returns array of paths to all *_cl.evt files in the directory recursively.
"""
links = glob(os.path.join(folder, '**', '*_cl.evt'), recursive=True)
links = glob(f'{folder}\\**\\*_cl.evt', recursive=True)
if sort_list:
sorted_list = sorted(links, key=lambda x: os.stat(x).st_size)
sorted_list = sorted(links, key=lambda x: stat(x).st_size)
return np.array(sorted_list)
else:
return np.array(links)
def csv_to_table(csv_path, fits_path):
"""
Transform the csv table to fits table with astropy.
"""
csv_file = read_csv(csv_path, index_col=0, dtype={'obs_id': str})
Table.from_pandas(csv_file).write(fits_path, overwrite=True)
def binary_array(num: int) -> list[list[bool]]:
"""
Returns list of all possible combinations of num of bool values.
@ -161,7 +152,7 @@ def add_borders(array, middle=True):
return mask
def fill_poisson(array, size_input=15):
def fill_poisson(array, size_input=32):
"""
Fills all masked elements of an array with poisson signal with local expected value.
"""
@ -171,17 +162,14 @@ def fill_poisson(array, size_input=15):
size = size_input
output = array.data.copy()
mask = array.mask.copy()
mask_full = np.ones(mask.shape)
while mask.sum() > 1:
kernel = np.ones((size, size))/size**2
coeff_full = fftconvolve(mask_full, kernel, mode='same')
coeff = fftconvolve(np.logical_not(mask), kernel, mode='same') / coeff_full
coeff = fftconvolve(np.logical_not(mask), kernel, mode='same')
mean = fftconvolve(output, kernel, mode='same')
idx = np.where(np.logical_and(mask, coeff > 0.7))
idx = np.where(np.logical_and(mask, coeff > 0.1))
output[idx] = np.random.poisson(np.abs(mean[idx]/coeff[idx]))
mask[idx] = False
size += size_input
size += (1 - size % 2)
size *= 2
return output
@ -207,7 +195,6 @@ def count_binning(array, count_per_bin: int = 2):
def cstat(expected, data: list, count_per_bin: int = 2) -> float:
_data = data.flatten()
if type(data) is np.ma.masked_array:
_data = _data[_data.mask == False]
_expected = expected
c_stat = 0
@ -244,7 +231,7 @@ class Observation:
resized_coeff = (coeff).reshape(2, 2).repeat(180, 0).repeat(180, 1)
return resized_coeff
def get_data(self, file, E_borders=[3, 20], generate_mask=True):
def get_data(self, file, E_borders=[3, 20]):
"""
Returns masked array with DET1 image data for given energy band.
Mask is created from observations badpix tables and to mask the border and gaps.
@ -257,23 +244,21 @@ class Observation:
data_mask = data[np.logical_not(idx_mask)]
build_hist = lambda array: np.histogram2d(array['DET1Y'], array['DET1X'], 360, [[0, 360], [0, 360]])[0]
output = build_hist(data_output)
if generate_mask:
mask = build_hist(data_mask)
mask = np.logical_or(mask, add_borders(output))
mask = np.logical_or(mask, self.get_bad_pix(file))
return output, mask
return output
def get_bad_pix(self, file, threshold=0.9):
"""
Creates a mask for observation based on badpix tables.
"""
current_dir = os.path.dirname(__file__)
current_dir = dirname(__file__)
output = np.ones((360, 360))
for det_id in range(4):
badpix = file[3 + det_id].data
badpix_exp = (badpix['TIME_STOP'] - badpix['TIME'])/self.exposure
pixpos = np.load(os.path.join(current_dir, 'pixpos', f'ref_pix{self.det}{det_id}.npy'), allow_pickle=True).item()
pixpos = np.load(f'{current_dir}\\pixpos\\ref_pix{self.det}{det_id}.npy', allow_pickle=True).item()
for raw_x, raw_y, exp in zip(badpix['RAWX'], badpix['RAWY'], badpix_exp):
y, x = pixpos[(raw_x, raw_y)]
output[x-3:x+11, y-3:y+11] -= exp
@ -287,7 +272,7 @@ class Observation:
correction_poiss = np.random.poisson(corr*array, corr.shape)
return array + correction_poiss
def wavdecomp(self, mode='gauss', thresh=0, occ_coeff=False):
def wavdecomp(self, mode='gauss', thresh=False, occ_coeff=False):
"""
Performs a wavelet decomposition of image.
"""
@ -316,6 +301,9 @@ class Observation:
temp_out = data-conv
# ERRORMAP CALCULATION
if thresh_max != 0:
if mode == 'gauss':
sig = ((wavelet(i)**2).sum())**0.5
if mode == 'atrous':
sig = atrous_sig(i)
bkg = fftconvolve(data, wavelet(i), mode='same')
bkg[bkg < 0] = 0
@ -341,19 +329,19 @@ class Observation:
"""
Returns a hdu_list with positions of masked pixels in RAW coordinates.
"""
y_region, x_region = np.where(region)
x_region, y_region = np.where(region)
hdus = []
for i in range(4):
current_dir = os.path.dirname(__file__)
pixpos = Table(fits.getdata(os.path.join(current_dir, 'pixpos', f'nu{self.det}pixpos20100101v007.fits'), i+1))
current_dir = dirname(__file__)
pixpos = Table(fits.getdata(f'{current_dir}\\pixpos\\nu{self.det}pixpos20100101v007.fits', i+1))
pixpos = pixpos[pixpos['REF_DET1X'] != -1]
ref_condition = np.zeros(len(pixpos['REF_DET1X']), dtype=bool)
test = np.zeros(len(pixpos['REF_DET1X']), dtype=bool)
for idx, (x, y) in enumerate(zip(pixpos['REF_DET1X'], pixpos['REF_DET1Y'])):
ref_condition[idx] = np.logical_and(np.equal(x, x_region), np.equal(y, y_region)).any()
test[idx] = np.logical_and(np.equal(x, x_region), np.equal(y, y_region)).any()
positions = np.array((pixpos['RAWX'][ref_condition], pixpos['RAWY'][ref_condition]))
if sum(ref_condition) != 0:
positions = np.array((pixpos['RAWX'][test], pixpos['RAWY'][test]))
if sum(test) != 0:
positions = np.unique(positions, axis=1)
rawx, rawy = positions[0], positions[1]
@ -369,13 +357,13 @@ class Observation:
hdu = fits.BinTableHDU.from_columns(columns)
naxis1, naxis2 = hdu.header['NAXIS1'], hdu.header['NAXIS2']
hdu.header = fits.Header.fromtextfile(os.path.join(current_dir, 'badpix_headers', f'nu{self.det}userbadpixDET{i}.txt'))
hdu.header = fits.Header.fromtextfile(f'{current_dir}\\badpix_headers\\nu{self.det}userbadpixDET{i}.txt')
hdu.header['NAXIS1'] = naxis1
hdu.header['NAXIS2'] = naxis2
hdus.append(hdu)
primary_hdu = fits.PrimaryHDU()
primary_hdu.header = fits.Header.fromtextfile(os.path.join(current_dir, 'badpix_headers', f'nu{self.det}userbadpix_main.txt'))
primary_hdu.header = fits.Header.fromtextfile(f'{current_dir}\\badpix_headers\\nu{self.det}userbadpix_main.txt')
hdu_list = fits.HDUList([
primary_hdu,
*hdus
@ -383,51 +371,28 @@ class Observation:
return hdu_list
def save_region(region, path, overwrite=False):
def process(args):
"""
Converts region from numpy mask notation (1 for masked, 0 otherwise)
to standart notation (0 for masked, 1 otherwise).
Saves the region as fits file according to given path.
Creates a mask using wavelet decomposition and produces some statistical and metadata about the passed observation.
args must contain two arguments: path to the file of interest and threshold, e.g. ('D:\Data\obs_cl.evt',(5,2))
"""
fits.writeto(f'{path}',
np.logical_not(region).astype(int),
overwrite=overwrite)
def process(obs_path, thresh):
"""
Creates a mask using wavelet decomposition and produces some stats
and metadata about the passed observation.
Arguments: path to the file of interest and threshold,
e.g. process('D:\\Data\\obs_cl.evt', (3, 2))
"""
table = {
'obs_id': [], 'detector': [], 'ra': [], 'dec': [],
'lon': [], 'lat': [], 't_start': [], 'exposure': [],
'count_rate': [], 'remaining_area': [], 'cash_stat': [],
'cash_stat_full': []
}
obs_path, thresh = args
bin_num = 6
try:
obs = Observation(obs_path)
sky_coord = SkyCoord(ra=obs.ra*u.deg,
dec=obs.dec*u.deg,
frame='fk5').transform_to('galactic')
sky_coord = SkyCoord(ra=obs.ra*u.deg, dec=obs.dec*u.deg, frame='fk5').transform_to('galactic')
lon, lat = sky_coord.l.value, sky_coord.b.value
useful_bin_num = 6
rem_signal, rem_area, poiss_comp, rms = np.zeros((4, 2**useful_bin_num))
rem_signal, rem_area, poiss_comp, rms = np.zeros((4, 2**bin_num))
region = np.zeros(obs.data.shape, dtype=bool)
region_raw = -1
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
masked_obs = np.ma.masked_array(obs.data, mask=region)
good_lvl = np.zeros(useful_bin_num, dtype=bool)
good_lvl = np.zeros(bin_num, dtype=bool)
good_idx = 0
if obs.exposure > 1000:
wav_obs = obs.wavdecomp('atrous', thresh, occ_coeff=True)
wav_sum = wav_obs[2:-1].sum(0)
occ_coeff = obs.get_coeff()
binary_arr = binary_array(useful_bin_num)
good_idx = len(binary_arr) - 1
binary_arr = binary_array(bin_num)
for idx, lvl in enumerate(binary_arr):
try:
@ -435,30 +400,30 @@ def process(obs_path, thresh):
except ValueError:
region = np.zeros(obs.data.shape, dtype=bool)
masked_obs = np.ma.masked_array(obs.data,
mask=region) * occ_coeff
rem_region = np.logical_and(region,
np.logical_not(obs.data.mask))
masked_obs = np.ma.masked_array(obs.data, mask=region)*occ_coeff
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
rem_signal[idx] = 1-obs.data[region].sum()/obs.data.sum()
rem_area[idx] = 1 - rem_region.sum()/np.logical_not(obs.data.mask).sum()
poiss_comp[idx] = cstat(masked_obs.mean(), masked_obs)
rms[idx] = np.sqrt(((masked_obs-masked_obs.mean())**2).mean())
for idx in range(len(poiss_comp)):
if ((poiss_comp[idx] < poiss_comp[-1] + 0.05) and
(rem_area[idx] > rem_area[good_idx])):
if ((poiss_comp[idx] < poiss_comp[good_idx]) and
(poiss_comp[idx] < poiss_comp[-1] + 0.05) and
(rem_area[idx] > rem_area[-1])):
good_idx = idx
if good_idx == 0:
good_idx = len(binary_arr) - 1
good_lvl = binary_arr[good_idx]
try:
region = wav_obs[2:-1][good_lvl].sum(0) > 0
if region.sum() > 0:
region_raw = obs.region_to_raw(region.astype(int))
except ValueError:
region = np.zeros(obs.data.shape, dtype=bool)
region_raw = obs.region_to_raw(region.astype(int))
masked_obs = np.ma.masked_array(obs.data, mask=region)
rem_region = np.logical_and(region, np.logical_not(obs.data.mask))
to_table = [obs.obs_id,
obs.det,
obs.ra,
@ -471,10 +436,9 @@ def process(obs_path, thresh):
1 - rem_region.sum()/np.logical_not(obs.data.mask).sum(), # rem_area
poiss_comp[good_idx],
poiss_comp[0],
rms[good_idx]
]
else:
wav_sum = np.zeros((360, 360))
to_table = [obs.obs_id,
obs.det,
obs.ra,
@ -488,24 +452,16 @@ def process(obs_path, thresh):
-1, # rem_area
-1,
-1,
-1
]
for key, value in zip(table.keys(), to_table):
table[key] = value
return table, region.astype(int), region_raw, wav_sum
return to_table, region.astype(int), region_raw
except TypeError:
return obs_path, -1, -1, -1
return obs_path, -1, -1
def _process_multi(args):
return process(*args)
def process_folder(input_folder=None, start_new_file=None, fits_folder=None,
thresh=None, cpu_num=0):
def process_folder(input_folder=None, start_new_file=None, fits_folder=None, thresh=None):
"""
Generates a fits-table of parameters, folder with mask images in DET1 and
BADPIX tables in RAW for all observations in given folder.
Generates a fits-table of parameters, folder with mask images in DET1 and BADPIX tables in RAW for all observations in given folder.
Note that observations with exposure < 1000 sec a skipped.
start_new_file can be either 'y' or 'n'.
thresh must be a tuple, e.g. (5,2).
@ -525,13 +481,10 @@ def process_folder(input_folder=None, start_new_file=None, fits_folder=None,
print('Cannot interprete input, closing script')
raise SystemExit(0)
if not (fits_folder):
print('Enter path to the output folder')
print(f'Enter path to the output folder')
fits_folder = input()
region_folder = os.path.join(fits_folder, 'Region')
region_raw_folder = os.path.join(fits_folder, 'Region_raw')
wav_sum_folder = os.path.join(fits_folder, 'Wav_sum')
region_folder = f'{fits_folder}\\Region'
region_raw_folder = f'{fits_folder}\\Region_raw'
if not thresh:
print('Enter threshold values for wavelet decomposition:')
print('General threshold:')
@ -543,29 +496,31 @@ def process_folder(input_folder=None, start_new_file=None, fits_folder=None,
obs_list = get_link_list(input_folder, sort_list=True)
start = perf_counter()
group_size = 50
os.makedirs(region_folder, exist_ok=True)
os.makedirs(region_raw_folder, exist_ok=True)
os.makedirs(wav_sum_folder, exist_ok=True)
makedirs(region_folder, exist_ok=True)
makedirs(region_raw_folder, exist_ok=True)
# FILTERING BY THE FILE SIZE
print(f'Finished scanning folders. Found {len(obs_list)} observations.')
table = {
'obs_id': [], 'detector': [], 'ra': [], 'dec': [],
'lon': [], 'lat': [], 't_start': [], 'exposure': [],
'count_rate': [], 'remaining_area': [], 'cash_stat': [],
'cash_stat_full': []
'count_rate': [], 'remaining_area': [], 'poisson_stat': [],
'poisson_stat_full': [], 'rms': []
}
if start_new:
out_table = DataFrame(table)
out_table.to_csv(os.path.join(fits_folder, 'overview.csv'))
out_table.to_csv(os.path.join(fits_folder, 'overview_skipped.csv'))
out_table.to_csv(f'{fits_folder}\\test.csv')
out_table.to_csv(f'{fits_folder}\\test_skipped.csv')
# FILTERING OUT PROCESSED OBSERVATIONS
already_processed_list = read_csv(
os.path.join(fits_folder, 'overview.csv'), index_col=0, dtype={'obs_id': str}
f'{fits_folder}\\test.csv',
index_col=0,
dtype={'obs_id': str}
)
already_skipped_list = read_csv(
os.path.join(fits_folder, 'overview_skipped.csv'), index_col=0, dtype={'obs_id': str}
f'{fits_folder}\\test_skipped.csv',
index_col=0,
dtype={'obs_id': str}
)
already_processed = (
already_processed_list['obs_id'].astype(str) +
already_processed_list['detector']
@ -574,7 +529,6 @@ def process_folder(input_folder=None, start_new_file=None, fits_folder=None,
already_skipped_list['obs_id'].astype(str) +
already_skipped_list['detector']
).values
obs_list_names = [
curr[curr.index('nu')+2:curr.index('_cl.evt')-2]
for curr in obs_list
@ -587,56 +541,44 @@ def process_folder(input_folder=None, start_new_file=None, fits_folder=None,
(curr not in already_skipped)
for curr in obs_list_names
])
obs_list = obs_list[np.logical_and(not_processed, not_skipped)]
print('Removed already processed observations.',
f'{len(obs_list)} observations remain.')
print(f'Removed already processed observations. {len(obs_list)} observations remain.')
# START PROCESSING
print('Started processing...')
num = 0
if cpu_num == 0:
cpu_num = cpu_count()
elif cpu_num < 0:
raise ValueError('cpu_num must be a positive integer')
elif cpu_num > cpu_count():
print('Chosen cpu_num exceed the number of CPU cores. Using cpu_count() instead.')
cpu_num = cpu_count()
for group_idx in range(len(obs_list)//group_size+1):
print(f'Started group {group_idx}')
group_list = obs_list[group_size*group_idx:min(group_size*(group_idx+1), len(obs_list))]
max_size = np.array([
os.stat(file).st_size/2**20
stat(file).st_size/2**20
for file in group_list
]).max()
process_num = (cpu_num if max_size < 50 else (cpu_num//2 if max_size < 200 else cpu_num//4 if max_size < 1000 else cpu_num//8))
process_num = (cpu_count() if max_size < 50 else (cpu_count()//2 if max_size < 200 else (cpu_count()//4 if max_size < 1000 else 1)))
print(f"Max file size in group is {max_size:.2f}Mb, create {process_num} processes")
with get_context('spawn').Pool(processes=process_num) as pool:
packed_args = map(lambda _: (_, thresh), group_list)
for result, region, region_raw, wav_sum in pool.imap(_process_multi, packed_args):
for result, region, region_raw in pool.imap(process, packed_args):
if type(result) is np.str_:
obs_id = result[result.index('nu'):result.index('_cl.evt')]
print(f'{num:>3} is skipped. File {obs_id}')
num += 1
continue
obs_name = str(result['obs_id'])+result['detector']
if result['exposure'] < 1000:
print(f'{num:>3} {obs_name} is skipped. Exposure < 1000')
DataFrame(result, index=[0]).to_csv(os.path.join(fits_folder, 'overview_skipped.csv'), mode='a', header=False)
for key, value in zip(table.keys(), result):
table[key] = [value]
if table['exposure'][0] < 1000:
print(f'{num:>3} {str(result[0])+result[1]} is skipped. Exposure < 1000')
DataFrame(table).to_csv(f'{fits_folder}\\test_skipped.csv', mode='a', header=False)
num +=1
continue
DataFrame(result, index=[0]).to_csv(os.path.join(fits_folder, 'overview.csv'), mode='a', header=False)
save_region(region, os.path.join(region_folder, f'{obs_name}_region.fits'), overwrite=True)
region_raw.writeto(os.path.join(region_raw_folder, f'{obs_name}_reg_raw.fits'), overwrite=True)
fits.writeto(os.path.join(wav_sum_folder, f'{obs_name}_wav_sum.fits'), wav_sum, overwrite=True)
print(f'{num:>3} {obs_name} is written.')
DataFrame(table).to_csv(f'{fits_folder}\\test.csv', mode='a', header=False)
fits.writeto(f'{region_folder}\\{str(result[0])+result[1]}_region.fits', region, overwrite=True)
if region_raw != -1:
region_raw.writeto(f'{region_raw_folder}\\{str(result[0])+result[1]}_reg_raw.fits', overwrite=True)
print(f'{num:>3} {str(result[0])+result[1]} is written.')
num +=1
print('Converting generated csv to fits file...')
print(f'Current time in: {(perf_counter()-start):.2f}')
print(f'Processed {num/len(obs_list)*100:.2f} percent')
csv_file = read_csv(os.path.join(fits_folder, 'overview.csv'), index_col=0, dtype={'obs_id': str})
Table.from_pandas(csv_file).write(os.path.join(fits_folder, 'overview.fits'), overwrite=True)
csv_file = read_csv(f'{fits_folder}\\test.csv', index_col=0, dtype={'obs_id': str})
Table.from_pandas(csv_file).write(f'{fits_folder}\\test.fits', overwrite=True)
print(f'Finished writing: {perf_counter()-start}')

View File

@ -4,14 +4,14 @@ with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="nuwavdet",
version="0.1.1",
name="nuwavsource",
version="0.0.8",
author="Andrey Mukhin",
author_email="amukhin@cosmos.ru",
description="A package for source exclusion in NuSTAR observation data using wavelet decomposition",
author_email="amukhin@phystech.edu",
description="A package for source exclusion in NuStar observation data using wavelet decomposition",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/andrey-rrousan/nuwavdet",
url="https://github.com/Andreyousan/nuwavsource",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=(