more apps
This commit is contained in:
0
srglib/__init__.py
Normal file
0
srglib/__init__.py
Normal file
3
srglib/admin.py
Normal file
3
srglib/admin.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.contrib import admin
|
||||
|
||||
# Register your models here.
|
5
srglib/apps.py
Normal file
5
srglib/apps.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class SrglibConfig(AppConfig):
|
||||
name = 'srglib'
|
92
srglib/check_transient.py
Normal file
92
srglib/check_transient.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import srglib.correlate_utils as correlate_utils
|
||||
from heasarc.models import HeasarcTable, TableColumn, HeasarcObjectClass, NSIDE_SOURCES, ORDER, \
|
||||
HeasarcXrayMaster, HeasarcXMMSSC, HeasarcBase, HeasarcCSC, HeasarcRASS2RXS, Heasarc2SXPS
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
import numpy as np
|
||||
|
||||
polctypes = {cat: ContentType.objects.get_for_model(model) for cat, model in
|
||||
zip(["rxs2", "sxps2", "3xmm", "csc2"], [HeasarcRASS2RXS, Heasarc2SXPS, HeasarcXMMSSC, HeasarcCSC])}
|
||||
|
||||
def check_rass_transient(erositasource):
|
||||
"""
|
||||
for provided erotranssource checks wether it has rosat counterparts in the RXS2 catalogue,
|
||||
it it has, checks, whether the source is transient relative to all counterparts, if it does not,
|
||||
check wether the source is expected to be 10 times brighter then detection limit for rass exposure
|
||||
|
||||
as a result of the check switches roast_transient field of the source
|
||||
|
||||
Parameters:
|
||||
--------
|
||||
erositasource: django.queryset
|
||||
a queryset of erosita sources
|
||||
"""
|
||||
correlate_utils.load_rassemap()
|
||||
rasssources = erositasource.heasarc.filter(polymorphic_ctype=polctypes["rxs2"])
|
||||
if rasssources.count():
|
||||
rassfluxes = np.array([rass.count_rate for rass in rasssources])
|
||||
if np.any(erositasource.ML_FLUX_0 < rassfluxes*correlate_utils.FSCALES["2rxs"]/10.) or \
|
||||
np.any(erositasource.ML_FLUX_0 > rassfluxes*correlate_utils.FSCALES["2rxs"]*10.):
|
||||
erositasource.rosat_transient = True
|
||||
erositasource.has_rosat=True
|
||||
else:
|
||||
erositasource.rosat_transient = correlate_utils.check_rss_cts_individual(erositasource)
|
||||
|
||||
def check_sxps_transients(erositasource):
|
||||
"""
|
||||
for provided erotranssource checks wether it has swift counterparts in the sxps2 catalog,
|
||||
it it has, checks, whether the source is transient relative to all counterparts
|
||||
|
||||
as a result of the check switches transient field of the source to False, if the erosita not a transient to at least one counterpart
|
||||
|
||||
Parameters:
|
||||
--------
|
||||
erositasource: django.queryset
|
||||
a queryset of erosita sources
|
||||
"""
|
||||
sxpscounterparts = erositasource.heasarc.filter(polymorphic_ctype=polctypes["sxps2"])
|
||||
if sxpscounterparts.exists():
|
||||
flux = np.array([s.PeakRate_band1 + s.PeakRate_band2 for s in sxpscounterparts])
|
||||
transient = np.all(flux*10*correlate_utils.FSCALES["sxps2"] < erositasource.ML_FLUX_0)
|
||||
print("sxps flux", flux, "eflux", erositasource.ML_FLUX_0*0.9e-12, "sxps transient", transient)
|
||||
erositasource.transient = erositasource.transient & transient
|
||||
|
||||
def check_xmm3_transients(erositasource):
|
||||
"""
|
||||
for provided erotranssource checks wether it has XMM-Newton counterparts in the 3xmm catalog,
|
||||
it it has, checks, whether the source is transient relative to all counterparts
|
||||
|
||||
as a result of the check switches transient field of the source to False, if the erosita not a transient to at least one counterpart
|
||||
|
||||
Parameters:
|
||||
--------
|
||||
erositasource: django.queryset
|
||||
a queryset of erosita sources
|
||||
"""
|
||||
xmmcounterparts = erositasource.heasarc.filter(polymorphic_ctype=polctypes["3xmm"])
|
||||
if xmmcounterparts.exists():
|
||||
flux = np.array([s.ep_1_flux + s.ep_3_flux + s.ep_2_flux for s in xmmcounterparts])
|
||||
transient = np.all(flux*10*correlate_utils.FSCALES["3XMM"] < erositasource.ML_FLUX_0)
|
||||
print("3xmm flux", flux, "eflux", erositasource.ML_FLUX_0*0.9e-12, "3xmm transient", transient)
|
||||
erositasource.transient = erositasource.transient & transient
|
||||
|
||||
def check_csc_transients(erositasource):
|
||||
"""
|
||||
for provided erotranssource checks wether it has chandra counterparts in the csc2 catalog,
|
||||
it it has, checks, whether the source is transient relative to all counterparts
|
||||
|
||||
as a result of the check switches transient field of the source to False, if the erosita not a transient to at least one counterpart
|
||||
|
||||
Parameters:
|
||||
--------
|
||||
erositasource: django.queryset
|
||||
a queryset of erosita sources
|
||||
"""
|
||||
chancounterparts = erositasource.heasarc.filter(polymorphic_ctype=polctypes["csc2"])
|
||||
if chancounterparts.exists():
|
||||
flux = np.array([s.s_photflux_ap + s.m_photflux_ap for s in chancounterparts])
|
||||
transient = np.all(flux*10*correlate_utils.FSCALES["csc2"] < erositasource.ML_FLUX_0)
|
||||
print("csc2 flux", flux, "eflux", erositasource.ML_FLUX_0*0.9e-12, "csc2 transient", transient)
|
||||
erositasource.transient = erositasource.transient & transient
|
||||
|
||||
|
||||
search_transients = {"sxps2": check_sxps_transients, "xmm3": check_xmm3_transients, "csc2": check_csc_transients}
|
536
srglib/correlate_utils.py
Normal file
536
srglib/correlate_utils.py
Normal file
@@ -0,0 +1,536 @@
|
||||
import pandas
|
||||
from scipy.spatial import cKDTree
|
||||
import numpy as np
|
||||
import pickle
|
||||
from astropy.io import fits
|
||||
from astropy.table import Table
|
||||
from math import sin, cos, pi, sqrt, log
|
||||
from scipy.sparse import coo_matrix
|
||||
from scipy.stats import norm as normdist
|
||||
import healpy
|
||||
import sys, os
|
||||
|
||||
|
||||
"""
|
||||
a note on who rosat-erosita scaling was obtained
|
||||
|
||||
significant amount of rosat sources in rxs2 catalog have accumulated count below 10 photons,
|
||||
therefore poisson statisitcs has to be applied for fitting.
|
||||
first 5000 erosita sources were crossmatched with 2rxs, 1365 counterparts were found
|
||||
along which 744 were one-to-one counterparts
|
||||
we cat that to 596 sources, by excluding 0-0.1 and 0.9-1.0 quantiles of rxs2.flux/erosita.flux distribution
|
||||
(we assume that those quantiles contain significant fraction of transients and small fraction of statistical outliers.
|
||||
|
||||
for the rest of the sources we minimized lkl function L = -ln \prod_i \int N(x, eflux_i, esigma_i)*Poisson(rasscounts_i, x*rassexposure_i) dx
|
||||
thus coefficient erosita.flux = 5.73*rxs2.countrate was obtained
|
||||
"""
|
||||
|
||||
FSCALES = {"3XMM": 1.24e+12, "2rxs": 5.74, "sxps2": 16.73, "csc2": 2.3e3}
|
||||
CFLUX = {"3XMM": lambda x: (x.EP_1_FLUX + x.EP_2_FLUX + x.EP_3_FLUX).values, "2rxs": lambda x: x.RATE.values, "sxps2": lambda x: (x.Rate_band0 + x.Rate_band1 + x.Rate_band2).values}
|
||||
EFLUX = {"3XMM": lambda x: np.sqrt(x.EP_1_FLUX_ERR**2. + x.EP_2_FLUX_ERR**2. + x.EP_3_FLUX_ERR**2.).values, "2rxs": lambda x: x.ERATE.values, "sxps2": lambda x: (x.Rate_band0_pos + x.Rate_band1_pos + x.Rate_band2_pos).values}
|
||||
CATALOGUES = {"3XMM": "xmm3_2.pickle", "2rxs": "rxs2table.pickle", "sxps2": "sxps2.pickle", }
|
||||
CATCOLORS = {"3XMM": "r", "2rxs": "g", "sxps2": "b", "csc2": "m"}
|
||||
RASSEMAP = None
|
||||
|
||||
def load_rassemap():
|
||||
global RASSEMAP
|
||||
if RASSEMAP is None:
|
||||
RASSEMAP = healpy.read_map("/data/rosat/RASS_EXP_1_2048.fits")
|
||||
|
||||
def vectopol(cat1):
|
||||
"""
|
||||
from the catalouge in the form of pandas table, which has RA and DEC columns with values in degrees
|
||||
produces 1 length vectors in cartesin space
|
||||
Parameters:
|
||||
catalogue with RA and DEC columns in degrees
|
||||
|
||||
Returns:
|
||||
vector of shape Nx3 where N - length of the catalogue, each column contains X, Y and Z cartesian coordinates
|
||||
"""
|
||||
vec = np.empty((cat1.index.size, 3), np.double)
|
||||
vec[:, 0] = np.cos(cat1.dec.values*pi/180.)*np.sin(cat1.ra.values*pi/180.)
|
||||
vec[:, 1] = np.cos(cat1.dec.values*pi/180.)*np.cos(cat1.ra.values*pi/180.)
|
||||
vec[:, 2] = np.sin(cat1.dec.values*pi/180.)
|
||||
return vec
|
||||
|
||||
|
||||
def prepare_ecat(ecat):
|
||||
ecat["error_radius"] = ecat.RADEC_ERR
|
||||
ecat["fllux"] = ecat.ML_FLUX_0
|
||||
return makehealpixindex(ecat)
|
||||
|
||||
|
||||
def pairwise_separation(cat1, cat2):
|
||||
"""
|
||||
with two pandas catalogues provided, produces one to one distances in arcseconds based ont the RA, DEC coorindates of each entry
|
||||
"""
|
||||
vec1 = vectopol(cat1)
|
||||
vec2 = vectopol(cat2)
|
||||
return np.arccos(np.minimum(np.sum(vec1*vec2, axis=1), 1))*180.*3600./pi
|
||||
|
||||
|
||||
def makedistmat3d(cat1, cat2, angularseparation=10.):
|
||||
"""
|
||||
produces distance matrix between two catalogues, each must have ra and dec columns
|
||||
|
||||
Parameters:
|
||||
cat1 and cat2: two catalogues in the form of pandas table, which have RA and DEC columns in degrees
|
||||
angularseparation: maximum accepted angular separation between objects in catalogues
|
||||
|
||||
Returns:
|
||||
distance sparce matrix of the separation lower then angularseparation between two input catalgoues
|
||||
"""
|
||||
|
||||
vec1 = vectopol(cat1)
|
||||
vec2 = vectopol(cat2)
|
||||
dd = 2.*sin(angularseparation*pi/180./3600./2.)
|
||||
c1 = cKDTree(vec1)
|
||||
c2 = cKDTree(vec2)
|
||||
dmat = c1.sparse_distance_matrix(c2, dd).tocoo()
|
||||
dmat.data = 2.*np.arcsin(dmat.data/2.)*180./pi*3600. #distance in arcseconds
|
||||
sidx = np.argsort(dmat.row)
|
||||
return coo_matrix((dmat.data[sidx], (dmat.row[sidx], dmat.col[sidx])), dmat.shape)
|
||||
|
||||
def get_closest_unique(cat1, cat2, angularseparation=5):
|
||||
dmat = makedistmat3d(cat1, cat2, angularseparation)
|
||||
|
||||
idxd = np.argsort(dmat.data)
|
||||
row = np.copy(dmat.row[idxd])
|
||||
col = np.copy(dmat.col[idxd])
|
||||
urow, uidx = np.unique(row, return_index=True)
|
||||
ucol, uuidx, ucts = np.unique(col[uidx], return_counts=True, return_index=True)
|
||||
masku = np.zeros(urow.size, np.bool)
|
||||
masku[uuidx] = True
|
||||
urow, ucol = urow[masku], col[uidx][masku] #one to one closest neighbours
|
||||
return urow, ucol
|
||||
|
||||
|
||||
def select_closest_pairs(dist, row, col):
|
||||
sidx = np.argsort(dist)
|
||||
dist = dist[sidx]
|
||||
row = row[sidx]
|
||||
col = col[sidx]
|
||||
ur, uc = [], []
|
||||
while True:
|
||||
urow, uidx = np.unique(row, return_index=True)
|
||||
ucol, uuidx, ucts = np.unique(col[uidx], return_counts=True, return_index=True)
|
||||
masku = np.zeros(urow.size, np.bool)
|
||||
masku[uuidx] = True
|
||||
urow, ucol = urow[masku], col[uidx][masku] #one to one closest neighbours
|
||||
ur.append(urow)
|
||||
uc.append(ucol)
|
||||
maskrow = np.isin(row, urow, invert=True)
|
||||
maskcol = np.isin(col, ucol, invert=True)
|
||||
mask = maskrow & maskcol
|
||||
if not np.any(mask):
|
||||
break
|
||||
row, col, dist = row[mask], col[mask], dist[mask]
|
||||
return np.concatenate(ur).astype(np.int), np.concatenate(uc).astype(np.int)
|
||||
|
||||
|
||||
def get_all_closest_unique(cat1, cat2, asep):
|
||||
dmat = makedistmat3d(cat1.iloc[idx1], cat2.iloc[idx2], asep)
|
||||
return select_closest_pairs(dmat.data, dmat.row, dmat.col)
|
||||
|
||||
|
||||
def get_closest(scat, ccat):
|
||||
ccat.error_radius.fillna(0., inplace=True)
|
||||
dmat = makedistmat3d(scat, ccat, 1200)
|
||||
sidx = np.argsort(dmat.data)
|
||||
urow, uidx = np.unique(dmat.row[sidx], return_index=True)
|
||||
ucol = dmat.col[sidx][uidx]
|
||||
return urow, ucol
|
||||
|
||||
|
||||
def cutfaraway(cat1, cat2, dmat, minrad=30, exclrad=0.):
|
||||
"""
|
||||
given distance matrix and two catalogues, cut all the elements of matrixes wich are
|
||||
greater then triple common distance error for corresponding catalogues elements
|
||||
catalogues should have error_radius column in it for this function to work
|
||||
"""
|
||||
mask = (dmat.data < np.maximum(np.sqrt(cat1.iloc[dmat.row].error_radius.values**2. + cat2.iloc[dmat.col].error_radius.values**2.)*3, minrad)) & (dmat.data > exclrad)
|
||||
dmat = coo_matrix((dmat.data[mask], (dmat.row[mask], dmat.col[mask])), dmat.shape)
|
||||
return dmat
|
||||
|
||||
def makeposerrdist(cat1, cat2, maxdist=None, minrad=30, **kwargs):
|
||||
"""
|
||||
assuming a 2d gaussian probability distribution of the true sourcce position relative to catalogue positions
|
||||
computes probaility of a common
|
||||
"""
|
||||
if maxdist is None:
|
||||
maxdist = max(min(sqrt(cat1.error_radius.dropna().values.max()**2. + cat2.error_radius.dropna().values.max()**2.)*3, 1200), minrad) #search radius not more then 20'
|
||||
print("used maxdist", maxdist)
|
||||
dmat = makedistmat3d(cat1, cat2, maxdist)
|
||||
return cutfaraway(cat1, cat2, dmat, minrad, **kwargs)
|
||||
|
||||
def dmattodistprobmat(cat1, cat2, dmat):
|
||||
pmat = coo_matrix((normdist.pdf(dmat.data/np.sqrt(cat1.iloc[dmat.row].error_radius.values**2. + cat2.iloc[dmat.col].error_radius.values**2.)), (dmat.row, dmat.col)), dmat.shape)
|
||||
return pmat
|
||||
|
||||
def makehealpixindex(cat, nside=1024, overwrite=False):
|
||||
"""
|
||||
produces healpix index for catalogue
|
||||
|
||||
parameteres:
|
||||
cat [mandatory]: a catalogue containing RA and DEC columns in degrees
|
||||
nside : nside for healpix
|
||||
overwrite[True]: overwrite healpix indexation if exists
|
||||
"""
|
||||
if not overwrite and isinstance(cat.index, pandas.MultiIndex) and "healpix" in cat.index.names:
|
||||
print("already has healpix indexing")
|
||||
return cat
|
||||
cat.reset_index(drop=True, inplace=True)
|
||||
hpix = healpy.ang2pix(nside, cat.RA.values, cat.DEC.values, lonlat=True)
|
||||
cat.set_index(hpix, inplace=True)
|
||||
cat.sort_index(inplace=True)
|
||||
hpix = np.copy(cat.index.values)
|
||||
cat.set_index(pandas.MultiIndex.from_arrays(
|
||||
[hpix, np.arange(cat.index.size)],
|
||||
sortorder=0, names=["healpix", "Num"]), inplace=True)
|
||||
cat.sort_index(inplace=True)
|
||||
return cat
|
||||
|
||||
def merge_coo_matrixes(dmatlist):
|
||||
"""
|
||||
assuming that the are several fraction of sparse matrix produces common sparse distance matrix
|
||||
|
||||
"""
|
||||
rows = np.concatenate([d.row for d in dmatlist])
|
||||
cols = np.concatenate([d.col for d in dmatlist])
|
||||
data = np.concatenate([d.data for d in dmatlist])
|
||||
return coo_matrix((data, (rows, cols)), shape=np.max([d.shape for d in dmatlist], axis=0))
|
||||
|
||||
def distwithhealpix(cat1, cat2, nside=1024, maxarea=20, maxcsize=100000, **kwargs):
|
||||
"""
|
||||
find counterparts of cat1 relative to cat2 using healpixes
|
||||
"""
|
||||
print("kwargs", kwargs)
|
||||
hpix = np.unique(cat1.index.get_level_values("healpix").values)
|
||||
hpix2 = np.unique(cat2.index.get_level_values("healpix").values)
|
||||
invert = False
|
||||
|
||||
if hpix2.size < hpix.size:
|
||||
cat1, cat2 = cat2, cat1
|
||||
invert = True
|
||||
hpix = hpix2
|
||||
|
||||
print("run distwithhealpix, unique pix size in first catalogue", hpix.size)
|
||||
catarea = hpix.size*healpy.nside2pixarea(nside)
|
||||
print("catalogue surface area", catarea)
|
||||
if catarea > maxarea or hpix.size > maxcsize:
|
||||
print("catalogues area is gt then maximal allowed")
|
||||
chank = int(max(catarea//maxarea + 1, hpix.size//maxcsize + 1))
|
||||
print("spit on %d parts" % chank)
|
||||
step = (hpix.size + (chank - 1))//chank
|
||||
dmats = []
|
||||
for i in range(chank):
|
||||
#print("start %d chank, from %d pices" % (i, chank))
|
||||
dmats.append(distwithhealpix(cat1.loc[hpix[i:min(i + step, hpix.size)]], cat2, nside, maxarea + 1, **kwargs))
|
||||
return merge_coo_matrixes(dmats)
|
||||
hpix = np.unique(np.concatenate([hpix, healpy.get_all_neighbours(256, hpix).ravel()]))
|
||||
catsm = cat2.loc[hpix]
|
||||
dmat = makeposerrdist(cat1, catsm, **kwargs)
|
||||
rows = cat1.index.get_level_values("Num").values[dmat.row]
|
||||
cols = catsm.index.get_level_values("Num").values[dmat.col]
|
||||
|
||||
if invert:
|
||||
rows, cols = cols, rows
|
||||
|
||||
sidx = np.argsort(rows)
|
||||
|
||||
dmat = coo_matrix((dmat.data[sidx], (rows[sidx], cols[sidx])), shape = (rows.max() + 1, cols.max() + 1))
|
||||
return dmat
|
||||
|
||||
def get_brightest_counterpart(cat1, cat2, dmat, axis=0):
|
||||
if axis == 0:
|
||||
fluxcat2 = coo_matrix((cat2.flux.values[dmat.col], (dmat.row, dmat.col)), dmat.shape)
|
||||
urow, idx1 = np.unique(dmat.row, return_index=True)
|
||||
fmcol = np.asarray(np.argmax(fluxcat2, axis=1)[urow]).reshape(-1)
|
||||
return urow, fmcol
|
||||
if axis == 1:
|
||||
fluxcat1 = coo_matrix((cat1.flux.values[dmat.row], (dmat.row, dmat.col)), dmat.shape)
|
||||
ucol = np.unique(dmat.col)
|
||||
fmrow = np.asarray(np.argmax(fluxcat1, axis=0)[:, ucol]).reshape(-1)
|
||||
return ucol, fmrow
|
||||
|
||||
def get_brighter_counterpart(cat1, cat2, dmat):
|
||||
fluxcat1 = coo_matrix((cat1.flux.values[dmat.row], (dmat.row, dmat.col)), dmat.shape)
|
||||
fluxcat2 = coo_matrix((cat2.flux.values[dmat.col], (dmat.row, dmat.col)), dmat.shape)
|
||||
urow = np.unique(dmat.row)
|
||||
fmcol = np.unique(np.asarray(np.argmax(fluxcat1, axis=1)[urow]).reshape(-1))
|
||||
fmrow = np.asarray(np.argmax(fluxcat2, axis=0)[:, fmcol]).reshape(-1)
|
||||
return fmrow, fmcol
|
||||
|
||||
def get_closest_counterpart(dmat, axis=0):
|
||||
dmati = coo_matrix((1./np.maximum(dmat.data, 1e-9), (dmat.row, dmat.col)), dmat.shape)
|
||||
if axis==0:
|
||||
fmrow = np.unique(dmat.row)
|
||||
fmcol = np.asarray(np.argmax(dmati, axis=1)[fmrow]).reshape(-1)
|
||||
if axis==1:
|
||||
fmcol = np.unique(dmat.col)
|
||||
fmrow = np.asarray(np.argmax(dmati, axis=0)[:, fmcol]).reshape(-1)
|
||||
return fmrow, fmcol
|
||||
|
||||
def coo_mat_argmax(coomat, axis=0):
|
||||
if axis==0:
|
||||
fmrow = np.unique(coomat.row)
|
||||
fmcol = np.asarray(np.argmax(coomat, axis=1)[fmrow]).reshape(-1)
|
||||
return fmcol
|
||||
if axis==1:
|
||||
fmcol = np.unique(coomat.col)
|
||||
fmrow = np.asarray(np.argmax(coomat, axis=0)[:, fmcol]).reshape(-1)
|
||||
return fmrow
|
||||
|
||||
|
||||
def argmax_plane_sorted_row_sparse_mat(smat):
|
||||
"""
|
||||
for a sparse matrix filled with some vlaues
|
||||
produces 1d array of indexes cooresponding to the data structure stored in smat, which provides
|
||||
argmax for each column of matrix
|
||||
"""
|
||||
urow, uiidx, ucts = np.unique(smat.row, return_counts=True, return_inverse=True)
|
||||
ii = np.arange(uiidx.size)
|
||||
uicts = np.empty(uiidx.size, np.int)
|
||||
uicts[:] = ucts[uiidx]
|
||||
ipos = np.empty(urow.size, np.int)
|
||||
for i in range(1, ucts.max() + 1):
|
||||
mask = ucts == i
|
||||
if not np.any(mask):
|
||||
continue
|
||||
maskall = uicts == i
|
||||
ms = mask.sum()
|
||||
ipos[mask] = ii[maskall].reshape((-1, i))[np.arange(ms), smat.data[maskall].reshape((-1, i)).argmax(axis=1)]
|
||||
return ipos
|
||||
|
||||
|
||||
def get_all_neighbours_fluxes(fluxmat, axis=0):
|
||||
ucol, uidx = np.return_index(fluxmat.row, return_counts=True)
|
||||
sidx = np.argsort(fluxmat.row)
|
||||
|
||||
def onetoonedistance(cat1, cat2):
|
||||
vec1 = vectopol(cat1)
|
||||
vec2 = vectopol(cat2)
|
||||
return np.arccos(np.sum(vec1*vec2, axis=1))*180./pi*3600.
|
||||
|
||||
|
||||
class CrossCorr(object):
|
||||
def __init__(self, cat1, cat2):
|
||||
self.cat1 = cat1
|
||||
self.cat2 = cat2
|
||||
self.dmat = distwithhealpix(cat1, cat2)
|
||||
self.flux2 = self.cat2.flux.values[self.dmat.col]
|
||||
self.flux2err = self.cat2.fluxerr.values[self.dmat.col]
|
||||
self.flux1 = self.cat1.flux.values[self.dmat.row]
|
||||
|
||||
self.pval = normdist.pdf(dmat.data/np.sqrt(self.cat1.iloc[dmat.row].error_radius.values**2. + \
|
||||
self.cat2.iloc[dmat.col].error_radius.values**2.))
|
||||
|
||||
self.urow, self.idx, self.iidx, self.ucts = np.unique(self.dmat.row,
|
||||
return_index=True, return_inverse=True, return_counts=True)
|
||||
|
||||
self.pidx = np.empty(self.ucts.size + 1, np.int)
|
||||
self.pidx[1:] = np.cumsum(self.ucts)
|
||||
self.pidx[0] = 0
|
||||
self.eidx = np.lib.stride_tricks.as_strided(self.pidx,
|
||||
(self.pidx.size - 1, 2),
|
||||
self.pidx.strides*2)
|
||||
self.mflux = np.empty(self.urow.size, np.double)
|
||||
self.maxflux = np.zeros(self.urow.size, np.double)
|
||||
np.maximum.at(self.maxflux, self.iidx, self.flux2)
|
||||
self.minflux = np.full(self.urow.size, np.inf, np.double)
|
||||
np.minimum.at(self.minflux, self.iidx, self.flux2)
|
||||
self.medflux = np.array([np.median(self.flux2[s:e]) for s, e in self.eidx])
|
||||
|
||||
self.uflux = self.cat1["flux"].iloc[self.urow] #flux1.data[self.idx]
|
||||
|
||||
self.newmask = np.isin(self.cat1.index.get_level_values("Num").values, self.urow, assume_unique=True, invert=True)
|
||||
self.hascandidates = np.logical_not(self.newmask)
|
||||
self.new = self.cat1[self.newmask]
|
||||
self.newidx = self.new.index.get_level_values("Num").values
|
||||
self.analyse_fluxes("")
|
||||
self.bestdmat = get_closest_counterpart(self.dmat)[1]
|
||||
self.bestpmat = coo_mat_argmax(self.pmat)
|
||||
self.bestdist = pairwise_separation(self.cat1.iloc[self.urow], self.cat2.iloc[self.bestdmat])
|
||||
self.bestdistprob = normdist.pdf(self.bestdist/np.sqrt(cat1.iloc[self.urow].error_radius.values**2. + cat2.iloc[self.bestdmat].error_radius.values**2.))
|
||||
self.bestdflux = self.cat2.iloc[self.bestdmat].flux.values
|
||||
|
||||
def analyse_fluxes(self, IndexSlice):
|
||||
for i in range(2, self.ucts.max() + 1):
|
||||
mask = self.ucts == i
|
||||
fl = np.array([self.flux2.data[self.pidx[mask] + j] for j in range(i)])
|
||||
flmax = np.max(fl, axis=0)
|
||||
flmin = np.min(fl, axis=0)
|
||||
flmed = np.median(fl, axis=0)
|
||||
self.mflux[mask] = flmed
|
||||
self.maxflux[mask] = flmax
|
||||
self.minflux[mask] = flmin
|
||||
#for k, uidx in enumerate(self.urow[mask]):
|
||||
# print(uidx, flmax[k], flmin[k], flmed[k], i)
|
||||
|
||||
def get_all_counterparts(self, idx):
|
||||
idx = np.sort(np.asarray(idx))
|
||||
idxmask = self.hascandidates[idx]
|
||||
urmask = np.isin(self.dmat.row, idx)
|
||||
catidx = pandas.DataFrame.copy(self.cat2.iloc[self.dmat.col[urmask]])
|
||||
catidx["counterpart"] = self.dmat.row[urmask]
|
||||
return catidx
|
||||
|
||||
def get_flux_outliers_mask(self, fscale):
|
||||
#print("uflux size", self.uflux.size)
|
||||
mask = np.logical_or(self.uflux > self.maxflux*fscale*10., self.uflux < self.minflux*fscale/10.)
|
||||
maskout = np.zeros(self.cat1.index.size, np.bool)
|
||||
maskout[self.hascandidates] = mask
|
||||
return maskout
|
||||
|
||||
|
||||
#RASSEMAP = healpy.read_map("RASS_EXP_1_2048.fits")
|
||||
#rets, rcval = pickle.load(open("rxsmediandetection.pickle", "rb"))
|
||||
#rcval = np.array(rcval)
|
||||
|
||||
def rxs_detection_limit(exposure):
|
||||
"""
|
||||
returns amount of counts for the source to be detected depending on exposure
|
||||
"""
|
||||
return 4.*(1. + (exposure/425)**4.)**(0.6/4.)
|
||||
|
||||
def check_rss_cts(ecat):
|
||||
expp = RASSEMAP[healpy.ang2pix(2048, ecat.RA.values, ecat.DEC.values, lonlat=True)]
|
||||
idx = np.searchsorted(rets, expp)
|
||||
alert = ecat.flux.values > rxs_detection_limit(expp)**FSCALES["2rxs"]*10./expp
|
||||
return ecat.index.get_level_values("Num").values[alert]
|
||||
|
||||
def check_rss_cts_individual(esrc):
|
||||
expp = RASSEMAP[healpy.ang2pix(2048, esrc.lii, esrc.bii, lonlat=True)]
|
||||
alert = False if expp == 0 else esrc.ML_FLUX_0 > rxs_detection_limit(expp)*FSCALES["2rxs"]*10/expp
|
||||
return alert
|
||||
|
||||
|
||||
#========================================================================================
|
||||
#to use with django HeasarcBase models
|
||||
|
||||
def check_rass_transient(erositasource, rasssources):
|
||||
rassfluxes = np.array([rass.count_rate for rass in rasssources])
|
||||
if np.any(erositasource.ML_FLUX_0 < rassfluxes*FSCALES["2rxs"]/10.) or \
|
||||
np.any(erositasource.ML_FLUX_0 > rassfluxes*FSCALES["2rxs"]*10.):
|
||||
erositasource.rosat_transient = True
|
||||
|
||||
def check_sxps_transients(erositasource, sxpscounterparts, transientstatus):
|
||||
flux = np.array([s.PeakRate_band1 + s.PeakRate_band2 for s in sxpscounterparts])
|
||||
transient = np.all(flux*10*FSCALES["sxps2"] < erositasource.ML_FLUX_0) or np.all(flux*0.1*FSCALES["sxps2"] > erositasource.ML_FLUX_0)
|
||||
return transientstatus & transient
|
||||
|
||||
def check_xmm3_transients(erositasource, xmmcounterparts, transientstatus):
|
||||
flux = np.array([s.ep_1_flux + s.ep_3_flux + s.ep_2_flux for s in xmmcounterparts])
|
||||
transient = np.all(flux*10*FSCALES["3XMM"] < erositasource.ML_FLUX_0)
|
||||
return transientstatus & transient
|
||||
|
||||
#======================================================================================
|
||||
|
||||
|
||||
|
||||
def run_all(ecat):
|
||||
newsources = np.ones(ecat.index.size, np.bool)
|
||||
transients = None #no information which of new sources are transients yet
|
||||
transient_catalogues = {}
|
||||
crosscat = {}
|
||||
crclist = {}
|
||||
rosat_outliers = {}
|
||||
|
||||
#take a look 2rxs is last one run
|
||||
for catalog in ["3XMM", "sxps2", "2rxs"]:
|
||||
cattable = pickle.load(open(CATALOGUES[catalog], "rb"))
|
||||
crc = CrossCorr(ecat, cattable)
|
||||
"""
|
||||
if catalog == "sxps2":
|
||||
midx = dmat.col[np.argsort(dmat.row)]
|
||||
mflx = (cattable.iloc[midx].PeakRate_band0.values + cattable.iloc[midx].PeakRate_band1.values)/1.9
|
||||
crc.maxflux[crc.dmat.row] = (cattable.iloc[crc.dmat.col].PeakRate_band0.values + cattable.iloc[crc.dmat.col].PeakRate_band1.values)/1.9
|
||||
"""
|
||||
crclist[catalog] = crc
|
||||
outliers = crc.get_flux_outliers_mask(FSCALES[catalog])
|
||||
transient_catalogues[catalog] = crc.get_all_counterparts(ecat.index.get_level_values("Num")[outliers])
|
||||
if transients is None:
|
||||
"""first candidates to be transients"""
|
||||
transients = outliers
|
||||
print("outliers indices", ecat.index.get_level_values("Num").values[outliers])
|
||||
print("%s catalog outliers" % catalog, outliers.sum())
|
||||
print("%s catalog newsources" % catalog, crc.newmask.sum())
|
||||
print("transient and new", np.sum(transients & crc.newmask))
|
||||
print("transient and outlier", np.sum(transients & outliers))
|
||||
transients = np.logical_or(transients & crc.newmask, transients & outliers)
|
||||
newsources = np.logical_and(newsources, crc.newmask)
|
||||
print("transients:", transients.sum())
|
||||
|
||||
|
||||
nonrxs2_outliers = check_rss_cts(ecat[crc.newmask])
|
||||
haslcs = np.logical_not(newsources[nonrxs2_outliers])
|
||||
transients = transients & outliers
|
||||
transients[nonrxs2_outliers] = True
|
||||
#transients = transients & nonrxs2_outliers
|
||||
return transient_catalogues, transients, newsources, nonrxs2_outliers, crclist
|
||||
|
||||
|
||||
def make_report(ecat, transients):
|
||||
ntrans = transients.sum()
|
||||
ndetect = newsources.size - newsources.sum() - transients.sum()
|
||||
nnew = newsources.sum()
|
||||
report = \
|
||||
f"""
|
||||
total sources in erosita catalogue {ecat.index.size},
|
||||
number of detections: {ndetect},
|
||||
new sources: {nnew},
|
||||
"""
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
idea to implement heare:
|
||||
generally speaking we want to find potnential transients in the daily produced by erosita source catalogue
|
||||
therefore in main I expected to find counterparts in several X-ray catalogues to the input erosita source catalogues
|
||||
"""
|
||||
|
||||
#fname = sys.argv[1]
|
||||
#data = pickle.load(open("eruds2.pickle", "rb")) #fits.getdata(fname)
|
||||
#data = pickle.load(open("/srg/work/medvedev/for_andrey/catalog_10000000101.pkl", "rb"))
|
||||
print("\n\n\n\nstart %s analysis" % sys.argv[1])
|
||||
data = pickle.load(open(sys.argv[1], "rb"))
|
||||
try:
|
||||
ecat = pandas.DataFrame(data)
|
||||
ecat[:10]
|
||||
except ValueError:
|
||||
dswp = data.newbyteorder("l")
|
||||
dswp.byteswap(inplace=True)
|
||||
data = dswp
|
||||
ecat = pandas.DataFrame(data)
|
||||
|
||||
ecat["error_radius"] = ecat.RADEC_ERR
|
||||
ecat["flux"] = ecat.ML_FLUX_0
|
||||
#ecat.DEC += 0.001
|
||||
ecat = makehealpixindex(ecat)
|
||||
|
||||
transient_catalogues, transients, newsources, nonrxs2_outliers, crclist = run_all(ecat)
|
||||
ecat_transients = ecat[transients]
|
||||
|
||||
|
||||
print("overall statistics, number of sources in catalogue", ecat.index.size, "newsources:", newsources.sum(), "transients:", transients.sum())
|
||||
#for row in ecat.transients:
|
||||
# print(row[["RA", "DEC", "flux", "DET_LIKE_0"]])
|
||||
#for transient_catalogues
|
||||
|
||||
#rssoutl = check_rss_cts(ecat)
|
||||
#rssoutl = rssout[newsources[rssoutl]
|
||||
|
||||
if transients.sum():
|
||||
erastranients = ecat[transients]
|
||||
erastranients["Num"] = erastranients.index.get_level_values("Num").values
|
||||
hdulist = [fits.PrimaryHDU(), fits.BinTableHDU(Table.from_pandas(erastranients), name="SRGE"),]
|
||||
for catalogue in transient_catalogues:
|
||||
cat = transient_catalogues[catalogue]
|
||||
tcat = cat[transients[cat.counterpart.values]]
|
||||
if tcat.index.size > 0:
|
||||
hdulist.append(fits.BinTableHDU(Table.from_pandas(tcat), name=catalogue))
|
||||
hdulist[-1].header.update({"CATALOG": catalogue})
|
||||
obsid = os.path.basename(sys.argv[1])
|
||||
fits.HDUList(hdulist).writeto("transients_%s.fits" % obsid[8:-4], overwrite=True)
|
894
srglib/genericsourceutils.py
Normal file
894
srglib/genericsourceutils.py
Normal file
@@ -0,0 +1,894 @@
|
||||
from astropy.coordinates import SkyCoord # High-level coordinates
|
||||
from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
|
||||
from astropy.coordinates import Angle, Latitude, Longitude # Angles
|
||||
from ErositaDailySourceCatalogues.models import ErositaDailyCatalog, ErositaSource
|
||||
from genericsource.models import GenericSource, GenericConnection, GenericCatalog, SrcAuxDATA
|
||||
import pandas as pd
|
||||
from astropy.io import fits
|
||||
from astropy.time import Time, TimeDelta
|
||||
from astropy_healpix import HEALPix, neighbours
|
||||
from astropy import units as au
|
||||
from django_pandas.io import read_frame
|
||||
from srglib import correlate_utils, check_transient
|
||||
from srglib.utils import make_source_name
|
||||
import numpy as np
|
||||
import re
|
||||
import glob
|
||||
import pickle
|
||||
|
||||
from math import pi, cos, sin
|
||||
from scipy.sparse import coo_matrix
|
||||
from astropy.time import Time, TimeDelta, TimezoneInfo, TimeFromEpoch
|
||||
|
||||
from erosurvey.models import NSIDE_PLATES, ORDER_PLATES
|
||||
from heasarc.models import NSIDE_SOURCES, ORDER, HeasarcBase, GreenSNR, RosatExpMap
|
||||
from auxcatalogues.models import SimbadSource, RXS2, SXPS2, CSC2, \
|
||||
XMM3DR8, ZTFSource, SDSSSpec, GAIASource, ZTFOid, ZTFlc, AllWISEgs, \
|
||||
NVSSgs
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db.models.functions.math import Sqrt, Cos, Power, Abs
|
||||
from django.db.models import F, Q, Max, Subquery, OuterRef, Min, Count, Value
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
import datetime
|
||||
from time import ctime
|
||||
import pytz
|
||||
import subprocess
|
||||
#xspec = {}
|
||||
"""
|
||||
import xspec
|
||||
from gdpyc import GasMap
|
||||
from plotly import graph_objects as go
|
||||
"""
|
||||
|
||||
|
||||
#==============================================================
|
||||
import os
|
||||
os.environ["HOME"] = "/tmp/djangoastropy/"
|
||||
if not os.path.exists("/tmp/djangoastropy/.astropy"): os.makedirs("/tmp/djangoastropy/.astropy")
|
||||
from astroquery.simbad import Simbad as aSimbad
|
||||
from astroquery.vizier import Vizier
|
||||
|
||||
import logging
|
||||
import requests
|
||||
import json
|
||||
import pprint
|
||||
import pandas
|
||||
|
||||
MJDREF = 51543.875
|
||||
TZ_UTC = TimezoneInfo(utc_offset=0*au.hour)
|
||||
TZ_MSK = TimezoneInfo(utc_offset=3*au.hour)
|
||||
|
||||
TIMESTR_FORMAT = '%Y.%m.%d %H:%M:%S'
|
||||
|
||||
xraycats = [ContentType.objects.get_for_model(m) for m in [RXS2, CSC2, XMM3DR8, SXPS2]] #, ErositaSource]]
|
||||
|
||||
hp = HEALPix(nside=NSIDE_SOURCES, order=ORDER, frame=FK5())
|
||||
hp_plates = HEALPix(nside=NSIDE_PLATES,
|
||||
order=ORDER_PLATES, frame=FK5())
|
||||
def mission2date(timesec):
|
||||
mjdref = Time(MJDREF, format='mjd')
|
||||
delta = TimeDelta(timesec, format='sec')
|
||||
dtime = mjdref + delta + 3*au.hour
|
||||
return dtime.to_datetime(timezone=TZ_MSK)
|
||||
|
||||
def str2date(timestr, tzone):
|
||||
time_raw = datetime.strptime(timestr, TIMESTR_FORMAT)
|
||||
time_zoned = time_raw.replace(tzinfo=tzone)
|
||||
return time_zoned
|
||||
|
||||
def date2mission(dtime):
|
||||
mjdref = Time(MJDREF, format='mjd')
|
||||
dtime = Time(dtime , format='datetime')
|
||||
return (dtime - mjdref).sec
|
||||
|
||||
def check_catsources_transient(cat):
|
||||
check_sources_transient(cat.erotranssource_set.all())
|
||||
cat.ntransients = cat.erotranssource_set.filter(Q(transient=True) | Q(rosat_transient=True)).count()
|
||||
cat.save()
|
||||
|
||||
|
||||
from astrobasis.models import GAIADR2 #container of ALL gaia sources DR2
|
||||
|
||||
def find_from_old_gaia_db(srcquery, neighsearch=1, **kwargs):
|
||||
"""
|
||||
for all sources in srcquery, which is assumed to have many-to-many relationtip to counterpatscat catalog,
|
||||
performs search of sources in counterpart catalog within max(minrad[=30], 3*sqrt(src.poserr**2 + counterpart.poserr**2))
|
||||
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
srcquery: django.srcquery - iterable providing access to catalog under consideration
|
||||
|
||||
counterpartscat: catalog to search counterparts in
|
||||
counterpartname: name of the many-to-many relationtip field in srcquery
|
||||
**kwargs: additional arguments provided to makeposerrdist functions (see doc within)
|
||||
"""
|
||||
scat = read_frame(srcquery)
|
||||
|
||||
uhealpix = np.unique(scat.healpix.values)
|
||||
uhealpix = np.unique(np.concatenate([uhealpix, neighbours(uhealpix, NSIDE_SOURCES, order=ORDER).ravel()]))
|
||||
|
||||
ccat = read_frame(GAIADR2.objects.filter(healpix__in=uhealpix))
|
||||
ccat.drop(columns=["filename", "id", "ra_error", "dec_error"], inplace=True)
|
||||
if scat.index.size == 0 or ccat.index.size == 0:
|
||||
return 0
|
||||
|
||||
gexist = GAIASource.objects.filter(healpix__in=uhealpix)
|
||||
find_counterparts(srcquery, gexist, neighsearch, **kwargs)
|
||||
mask = np.isin(ccat.source_id.values, gexist.values_list("source_id"), invert=True)
|
||||
ccat = ccat[mask]
|
||||
|
||||
ccat.error_radius.fillna(0., inplace=True)
|
||||
dmat = correlate_utils.makeposerrdist(scat, ccat, **kwargs)
|
||||
print(dmat.data)
|
||||
uidx, ucts = np.unique(dmat.row, return_counts=True)
|
||||
cs = np.empty(ucts.size + 1, np.int)
|
||||
cs[0] = 0
|
||||
cs[1:] = ucts.cumsum()
|
||||
ce = np.lib.stride_tricks.as_strided(cs, (cs.size - 1, 2), cs.strides*2)
|
||||
glist = []
|
||||
gnewlist = []
|
||||
gaiacat = GenericCatalog.objects.get(name="GAIA DR2")
|
||||
for sid, cid, d in zip(scat.iloc[dmat.row].id.values, dmat.col, dmat.data):
|
||||
src1 = srcquery.get(id=sid)
|
||||
src2 = GAIASource.objects.create(**ccat.iloc[cid])
|
||||
gaiacat.genericsource_set.add(src2)
|
||||
glist.append(GenericConnection(connected=src1, connectto=src2, separation=d))
|
||||
glist.append(GenericConnection(connected=src2, connectto=src1, separation=d))
|
||||
GenericConnection.objects.bulk_create(glist, ignore_conflicts=True)
|
||||
|
||||
def update_catalog_connections(newsrc):
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=CSC2), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SXPS2), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=XMM3DR8), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=RXS2), minrad=20., maxdist=80.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SDSSSpec), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(Q(instance_of=ErositaSource) & Q(erositasource__obsdate__lt=cat.stop - datetime.timedelta(days=10))), minrad=3, maxdist=20)
|
||||
load_simbad_auxcat(newsrc)
|
||||
load_ztf_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
load_allwise_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
load_NVSS_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
newsrc = newsrc.annotate(xct=Count("connections", filter=Q(connections__polymorphic_ctype__in=xraycats)))
|
||||
hasxcounterparts = newsrc.filter(xct__gt=0)
|
||||
hasxcounterparts.annotate(mcxflux=Max("connections__sxflux")).filter(sxflux__gt=F("mcxflux")*Value(10.)).update(transient=True)
|
||||
GenericCatalog.objects.get(name="Erosita flux transients").genericsource_set.add(*hasxcounterparts)
|
||||
hasnoxcounterparts = newsrc.filter(xct=0).annotate(rassexp=Subquery(RosatExpMap.objects.filter(healpix=OuterRef("healpix")).values("exposure")[:1]))
|
||||
hasnoxcounterparts = hasnoxcounterparts.annotate(rasslim=Value(4.*5.76*0.89211e-12)*Power(Value(1.) + Power(F("rassexp")/Value(425), Value(4.)), Value(0.15))/F("rassexp"))
|
||||
hasnoxcounterparts.filter(rassexp__gt=0, sxflux__gt=F("rasslim")*Value(10.)).update(rosat_transient=True)
|
||||
load_simbad_auxcat(newsrc)
|
||||
|
||||
|
||||
def read_trans_pickle(filename):
|
||||
data = pandas.read_pickle(filename)
|
||||
data.rename(columns={"RA":"ra", "DEC":"dec", "L":"lii", "B":"bii", "CTS":"ML_CTS_0", "LIKE": "DET_LIKE_0",}, inplace=True)
|
||||
sc = SkyCoord(data.ra.values, data.dec.values, unit="deg", frame="fk5")
|
||||
hp = HEALPix(nside=NSIDE_SOURCES, order=ORDER, frame=FK5())
|
||||
hp_plate = HEALPix(nside=NSIDE_PLATES,
|
||||
order=ORDER_PLATES,
|
||||
frame=FK5())
|
||||
data["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
data["healpix_plate"] = hp_plate.skycoord_to_healpix(sc)
|
||||
data["name"] = [make_source_name("SRGe", r, d) for r, d in zip(data.ra.values, data.dec.values)]
|
||||
|
||||
return data.drop(columns=["LIKE_REF", "REF_FLUX", "D2D", "LIKE_REF", "ID_REF", "EXT_REF", \
|
||||
"CTS_REF", "EXP", "EXP_REF", "FLAG", "G_NH", "g_d2d", "g_s", \
|
||||
"g_id", "g_ra", "g_dec", "s_d2d", "s_id", "s_z", "s_otype", \
|
||||
"sdss_d2d", "sdss_id", "sdss_z", "FLUX", "ID_SRC", "RATIO"])
|
||||
|
||||
def read_esrc_pack(filename):
|
||||
ftime = datetime.datetime.strptime(ctime(os.path.getctime(filename)), "%a %b %d %H:%M:%S %Y")
|
||||
df = pandas.read_pickle(filename)
|
||||
df.rename(columns={"RADEC_ERR": "error_radius",
|
||||
"srcID": "srcid", "ML_EXP_1":'APE_EXP_1'}, inplace=True)
|
||||
|
||||
if "RA_fin" in df.columns.values:
|
||||
df.rename(columns={"RA_fin":"ra", "DEC_fin":"dec"}, inplace=True)
|
||||
elif "RA_corr" in df.columns.values:
|
||||
df.rename(columns={"RA_corr":"ra", "DEC_corr":"dec"}, inplace=True)
|
||||
else:
|
||||
df.rename(columns={"RA":"ra", "DEC":"dec"}, inplace=True)
|
||||
|
||||
sc = SkyCoord(df.ra.values, df.dec.values, unit="deg", frame="fk5")
|
||||
hp = HEALPix(nside=NSIDE_SOURCES, order=ORDER, frame=FK5())
|
||||
hp_plate = HEALPix(nside=NSIDE_PLATES,
|
||||
order=ORDER_PLATES,
|
||||
frame=FK5())
|
||||
df["lii"] = sc.galactic.l.value
|
||||
df["bii"] = sc.galactic.b.value
|
||||
df["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
df["healpix_plate"] = hp_plate.skycoord_to_healpix(sc)
|
||||
df["name"] = [make_source_name("SRGe", r, d) for r, d in zip(df.ra.values, df.dec.values)]
|
||||
df["id"] = df.index.values
|
||||
df["sxflux"] = df.ML_FLUX_0.values
|
||||
df["obsdate"] = (Time(51543.87, format="mjd") + TimeDelta((df.TSTART.values + df.TSTOP.values)/2., format="sec")).to_datetime(timezone=pytz.UTC) #datetime.timezone.utc)
|
||||
df["tstart"] = (Time(51543.87, format="mjd") + TimeDelta(df.TSTART.values, format="sec")).to_datetime(timezone=pytz.UTC)
|
||||
df["tstop"] = (Time(51543.87, format="mjd") + TimeDelta(df.TSTOP.values, format="sec")).to_datetime(timezone=pytz.UTC)
|
||||
df["detected"] = True
|
||||
return df.drop(columns=df.columns.difference([f.name for f in ErositaSource._meta.get_fields()] + ["id",])), ftime
|
||||
|
||||
def setup_xspec(xspec):
|
||||
"""Setup xspec parameters."""
|
||||
xspec.Fit.statMethod = 'cstat'
|
||||
xspec.Fit.query = 'yes'
|
||||
xspec.Fit.nIterations = 100
|
||||
xspec.Xset.abund = 'wilm'
|
||||
xspec.Plot.area = True
|
||||
xspec.Plot.xLog = True
|
||||
xspec.Plot.yLog = True
|
||||
xspec.Plot.xAxis = 'keV'
|
||||
xspec.Plot.setRebin(1.5, 20)
|
||||
|
||||
def fit_powerlaw_v2(fname, nH=0.006, G=2.0, norm=1e-4, fit_nH=True, pdfname=None):
|
||||
# Initialization
|
||||
dirpath = os.path.dirname(fname)
|
||||
cwd = os.getcwd()
|
||||
os.chdir(dirpath if dirpath else "./")
|
||||
|
||||
#xspec.Xset.openLog(fname[:-4] + "log")
|
||||
xspec.Xset.chatter = 0
|
||||
xspec.AllData.clear()
|
||||
xspec.AllModels.clear()
|
||||
xspec.AllChains.clear()
|
||||
# Fitting
|
||||
sp = xspec.AllData(fname)
|
||||
xspec.AllData.ignore('**-0.3 8.-**')
|
||||
xspec.AllData.ignore('bad')
|
||||
model = xspec.Model('tbabs*pow')
|
||||
model.TBabs.nH.values = [nH, 0.001, nH, nH, 20, 20]
|
||||
model.powerlaw.PhoIndex.values = [G, 0.01, 0.1, 0.1, 10, 10]
|
||||
model.powerlaw.norm.values = [norm, 1e-6, 1e-10, 1e-10, 100, 100]
|
||||
|
||||
# first, fit the normalization
|
||||
model.TBabs.nH.frozen = True
|
||||
model.powerlaw.PhoIndex.frozen = True
|
||||
xspec.Fit.perform()
|
||||
model.TBabs.nH.frozen = False
|
||||
model.powerlaw.PhoIndex.frozen = False
|
||||
if not fit_nH:
|
||||
model.TBabs.nH.frozen = True
|
||||
# than make a full fit
|
||||
xspec.Fit.perform()
|
||||
xspec.Fit.error('stop 1000,1.,2')
|
||||
xspec.Fit.perform()
|
||||
xspec.Fit.error('stop 1000,1.,2')
|
||||
xspec.Fit.perform()
|
||||
if fit_nH:
|
||||
xspec.Fit.error('stop 1000,1.,1')
|
||||
xspec.Fit.perform()
|
||||
#xspec.Fit.error('1. 1-3')
|
||||
#bestfit = XContainer(srcID)
|
||||
#bestfit.get_session()
|
||||
#xspec.Xset.closeLog()
|
||||
xspec.Plot.device = "/null"
|
||||
xspec.Plot("eeuf")
|
||||
x, y, xe, ye, mo = xspec.Plot.x(), xspec.Plot.y(), xspec.Plot.xErr(), xspec.Plot.yErr(), xspec.Plot.model()
|
||||
return model, np.array([x, y, xe, ye, mo])
|
||||
|
||||
def add_aux_data(srcid, survey, coord=None, newsourceid=None, xspec=None):
|
||||
if xspec is None:
|
||||
import xspec
|
||||
from gdpyc import GasMap
|
||||
#from plotly import graph_objects as go
|
||||
|
||||
specfile = glob.glob("/data/erosita/transients/products/src_%d_020_SourceSpec_00001_e%d.fits*" % (srcid, survey))
|
||||
bkgfile = glob.glob("/data/erosita/transients/products/src_%d_020_BackgrSpec_00001_e%d.fits*" % (srcid, survey))
|
||||
arffile = glob.glob("/data/erosita/transients/products/src_%d_020_ARF_00001_e%d.fits*" % (srcid, survey))
|
||||
rmffile = glob.glob("/data/erosita/transients/products/src_%d_020_RMF_00001_e%d.fits*" % (srcid, survey))
|
||||
specimg = glob.glob("/data/erosita/transients/products/src_%d_020_SourceSpec_00001_e%d.pdf" % (srcid, survey))
|
||||
phimg = glob.glob("/data/erosita/transients/products/trans_%d_e%d.png" % (srcid, survey))
|
||||
specmo = glob.glob("/data/erosita/transients/products/src_%d_020_SourceSpec_00001_e%d.pdf" % (srcid, survey))
|
||||
lcpdf = glob.glob("/data/erosita/transients/products/lc*_%d_e%d.pdf" % (srcid, survey))
|
||||
|
||||
res = {}
|
||||
if specfile: res["spec"] = os.path.basename(specfile[0])
|
||||
if bkgfile: res["bkg"] = os.path.basename(bkgfile[0])
|
||||
if arffile: res["arf"] = os.path.basename(arffile[0])
|
||||
if rmffile: res["rmf"] = os.path.basename(rmffile[0])
|
||||
if specimg: res["specimg"] = os.path.basename(specimg[0])
|
||||
if phimg: res["img"] = os.path.basename(phimg[0])
|
||||
if specmo: res["xcm"] = os.path.basename(specmo[0])
|
||||
if lcpdf: res["lcpdf"] = os.path.basename(lcpdf[0])
|
||||
print(res, newsourceid)
|
||||
cwd = os.getcwd()
|
||||
os.chdir("/data/erosita/transients/products")
|
||||
rname = "" if not "spec" in res else res["spec"]
|
||||
if newsourceid:
|
||||
dpath = "/data/products/erotrans/%d" % newsourceid
|
||||
ppath = "/data/erosita/transients/products/"
|
||||
if not os.path.exists(dpath):
|
||||
os.mkdir(dpath)
|
||||
if "spec" in res: subprocess.run(["cp", os.path.join(ppath, res["spec"]), os.path.join(dpath, res["spec"])])
|
||||
if "bkg" in res: subprocess.run(["cp", os.path.join(ppath, res["bkg"]), os.path.join(dpath, res["bkg"])])
|
||||
if "arf" in res: subprocess.run(["cp", os.path.join(ppath, res["arf"]), os.path.join(dpath, res["arf"])])
|
||||
if "rmf" in res: subprocess.run(["cp", os.path.join(ppath, res["rmf"]), os.path.join(dpath, res["rmf"])])
|
||||
if "specimg" in res: subprocess.run(["cp", os.path.join(ppath, res["specimg"]), os.path.join(dpath, res["specimg"])])
|
||||
if "img" in res: subprocess.run(["cp", os.path.join(ppath, res["img"]), os.path.join(dpath, res["img"])])
|
||||
if "xcm" in res: subprocess.run(["cp", os.path.join(ppath, res["xcm"]), os.path.join(dpath, res["xcm"])])
|
||||
if "lcpdf" in res: subprocess.run(["cp", os.path.join(ppath, res["lcpdf"]), os.path.join(dpath, res["lcpdf"])])
|
||||
os.chdir(dpath)
|
||||
p = subprocess.Popen(["echo", "group min 5 & exit"], stdout=subprocess.PIPE)
|
||||
if "spec" in res:
|
||||
rname = res["spec"][:-5] + "_r5.fits"
|
||||
subprocess.run(["grppha", res["spec"], rname], stdin=p.stdout)
|
||||
|
||||
cts = 0 if not "spec" in res else fits.getheader(res["spec"], 1)["CTS"]
|
||||
try:
|
||||
if cts > 20 or coord is None:
|
||||
model, pdata = fit_powerlaw_v2(rname)
|
||||
else:
|
||||
model, pdata = fit_powerlaw_v2(rname, fit_nH=False, nH=GasMap(coord, nhmap="HI4PI",hires=True).value/1e22)
|
||||
except:
|
||||
print("fail to fit")
|
||||
else:
|
||||
res["gamma"] = model.powerlaw.PhoIndex.values[0]
|
||||
res["nh"] = model.TBabs.nH.values[0]*1e21
|
||||
res["norm"] = model.powerlaw.norm.values[0]
|
||||
res["gammael"] = model.powerlaw.PhoIndex.error[0]
|
||||
res["gammaeu"] = model.powerlaw.PhoIndex.error[1]
|
||||
pickle.dump(pdata, open(os.path.join(dpath, 'pdata.pickle'), 'wb'))
|
||||
x, y, xe, ye, mo = pdata
|
||||
fig = go.Figure(data=[go.Scatter(x=x, y=y, error_x={"type":"data", "array":xe}, error_y={"type":"data", "array":ye}, mode="markers"),
|
||||
go.Scatter(x=x, y=mo, mode="lines")], layout=dict(width=1000))
|
||||
fig.update_xaxes(type="log")
|
||||
fig.update_yaxes(type="log")
|
||||
open(os.path.join(dpath, "spec.html"), "w").write(fig.to_html(full_html=False))
|
||||
return res
|
||||
|
||||
def load_erosita_trans_daily(filename, survey, apps=5, load_cat="Erosita surveys transients"):
|
||||
ncat, ftime = read_esrc_pack(filename)
|
||||
ncat["survey"] = survey
|
||||
return load_erosita_trans_daily_cat(ncat, ftime, apps, load_cat)
|
||||
|
||||
|
||||
def load_erosita_trans_daily_cat(ncat, ftime, apps=5, load_cat="Erosita surveys transients"):
|
||||
import xspec
|
||||
from gdpyc import GasMap
|
||||
from plotly import graph_objects as go
|
||||
"""
|
||||
there is a new type of file, which contains only new transients between first and second survey
|
||||
"""
|
||||
cat = GenericCatalog.objects.get(name = load_cat)
|
||||
if cat.stop == ftime:
|
||||
print("latest version already loaded")
|
||||
return None
|
||||
catsrc = ErositaSource.objects.filter(id__in=cat.genericsource_set.all())
|
||||
ecat = read_frame(catsrc)
|
||||
urow = np.empty(0, np.int)
|
||||
ucol = np.empty(0, np.int)
|
||||
if ecat.index.size:
|
||||
maske = np.ones(ecat.index.size, np.bool)
|
||||
maskn = np.ones(ncat.index.size, np.bool)
|
||||
eidx = np.arange(ecat.index.size)
|
||||
nidx = np.arange(ncat.index.size)
|
||||
for i in range(21):
|
||||
if not (np.any(maske) and np.any(maskn)):
|
||||
break
|
||||
row, col = correlate_utils.get_closest_unique(ecat[maske], ncat[maskn], apps)
|
||||
if row.size == 0 or np.all(~maske) or np.all(~maskn):
|
||||
break
|
||||
urow = np.concatenate([urow, eidx[maske][row]])
|
||||
ucol = np.concatenate([ucol, nidx[maskn][col]])
|
||||
maske[urow] = False
|
||||
maskn[ucol] = False
|
||||
|
||||
if urow.size:
|
||||
mask = np.array([abs((ecat.iloc[i].obsdate - ncat.iloc[j].obsdate).days) for i, j in zip(urow, ucol)]) < 3
|
||||
urow, ucol = urow[mask], ucol[mask]
|
||||
|
||||
setup_xspec(xspec)
|
||||
|
||||
ncatidd = ncat.drop(columns=["id",])
|
||||
print(urow.size, ncat.index.size)
|
||||
print("list of existing sources", urow, ucol)
|
||||
for i, j in zip(urow, ucol):
|
||||
src = catsrc.filter(id__in=[ecat.iloc[i].id,])
|
||||
src.update(**ncatidd.iloc[j])
|
||||
src = src.first()
|
||||
sc = SkyCoord(src.ra, src.dec, unit="deg", frame="fk5")
|
||||
print(src.name, ncat.iloc[j].name)
|
||||
try:
|
||||
auxdata = add_aux_data(ncat.iloc[j].name, src.survey, sc, src.id, xspec)
|
||||
except Exception:
|
||||
print("warn: no spectum info for %d found" % ncat.iloc[j].id)
|
||||
else:
|
||||
print("auxdata", auxdata)
|
||||
try:
|
||||
adata = src.srcauxdata
|
||||
print(adata)
|
||||
except:
|
||||
print(src)
|
||||
print(src.name, src.id)
|
||||
adata = SrcAuxDATA.objects.create(src=src)
|
||||
|
||||
print("update source", auxdata)
|
||||
for key, val in auxdata.items():
|
||||
adata.__setattr__(key, val)
|
||||
print(key, val, getattr(adata, key))
|
||||
adata.save()
|
||||
|
||||
"""
|
||||
if urow.size != ecat.index.size:
|
||||
mask = np.ones(ecat.index.size, np.bool)
|
||||
mask[urow] = False
|
||||
catr = catsrc.filter(id__in=ecat[mask].id.values)
|
||||
cat.genericsource_set.remove(*catr)
|
||||
GenericCatalog.objects.get(name="Erosita surveys transients, removed").genericsource_set.add(*catr)
|
||||
"""
|
||||
if ucol.size < ncat.index.size:
|
||||
catsrc.filter(srcstatus="new").update(srcstatus="")
|
||||
idx = np.arange(ncat.index.size)
|
||||
idx = idx[np.isin(idx, ucol, invert=True, assume_unique=True)]
|
||||
newsrc = []
|
||||
for i in idx:
|
||||
src = ErositaSource.objects.create(**ncatidd.iloc[i], srcstatus="new")
|
||||
sc = SkyCoord(src.ra, src.dec, unit="deg", frame="fk5")
|
||||
print("processing", ncat.iloc[i].name, src.id)
|
||||
try:
|
||||
auxdata = add_aux_data(ncat.iloc[i].name, src.survey, sc, src.id, xspec)
|
||||
except Exception:
|
||||
print("warn: no spectum info found")
|
||||
else:
|
||||
pass
|
||||
print('create new source', auxdata)
|
||||
SrcAuxDATA.objects.create(src=src, **auxdata)
|
||||
#SrcAuxDATA.objects.create(src=src, **auxdata)
|
||||
newsrc.append(src)
|
||||
|
||||
cat.genericsource_set.add(*newsrc)
|
||||
newsrc = cat.genericsource_set.filter(id__in=[src.id for src in newsrc])
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=CSC2), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SXPS2), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=XMM3DR8), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=RXS2), minrad=20., maxdist=80.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SDSSSpec), minrad=3., maxdist=20.)
|
||||
#find_from_old_gaia_db(newsrc, minrad=5, maxdist=20)
|
||||
#load_allwise_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
#find_counterparts(newsrc, GenericSource.objects.filter(instance_of=AllWISEgs), minrad=3., maxdist=20.)
|
||||
#load_NVSS_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
#find_counterparts(newsrc, GenericSource.objects.filter(instance_of=NVSSgs), minrad=3., maxdist=20.)
|
||||
#find_counterparts(newsrc, GenericSource.objects.filter(Q(instance_of=ErositaSource) & Q(erositasource__obsdate__lt=cat.stop - datetime.timedelta(days=10))), minrad=3, maxdist=20)
|
||||
#load_simbad_auxcat(newsrc)
|
||||
#find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SimbadSource), minrad=3., maxdist=20.)
|
||||
#load_ztf_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
#find_counterparts(newsrc, GenericSource.objects.filter(instance_of=ZTFOid), minrad=3., maxdist=20.)
|
||||
|
||||
cat.stop = ftime
|
||||
cat.save()
|
||||
return newsrc
|
||||
|
||||
|
||||
def load_erosita_daily_source(filename):
|
||||
""" Loads catalog of eRosita sources to database.
|
||||
|
||||
This procedure accepts filename in format 'catalog_OBSID.pkl',
|
||||
where OBSID is 11-digit identificator. In case this OBSID is
|
||||
already loaded to DB, the procedure exits. Otherwise,
|
||||
catalog is loaded to DB.
|
||||
Parameters
|
||||
----------
|
||||
filename : str
|
||||
Absolute path to the input catalog file
|
||||
|
||||
Returns
|
||||
-------
|
||||
status : int
|
||||
Status code {0:'Already in DB', ID:'Object ID or Primary key in DB'}
|
||||
|
||||
|
||||
:Author:
|
||||
Andrey Semena <san@cosmos.ru>
|
||||
|
||||
"""
|
||||
#metadata = json.load(open(filename.split(".")[0] + ".json"))
|
||||
|
||||
obsid = re.findall("\d+", filename)[0]
|
||||
|
||||
logging.debug("Loading %s, ObsID: %s" % (filename,obsid,))
|
||||
|
||||
|
||||
check_repeat = ErositaDailyCatalog.objects.filter(eroday_start=obsid[:5]).first()
|
||||
if check_repeat and check_repeat.obsid >= obsid:
|
||||
print("obsid exists")
|
||||
logging.info("ObsID %s is already loaded, skip" % obsid)
|
||||
return 0
|
||||
|
||||
logging.info("Loading ObsID %s" % obsid)
|
||||
|
||||
hp = HEALPix(nside=NSIDE_SOURCES,
|
||||
order=ORDER, frame=FK5())
|
||||
|
||||
hp_plate = HEALPix(nside=NSIDE_PLATES,
|
||||
order=ORDER_PLATES,
|
||||
frame=FK5())
|
||||
|
||||
datafile=filename
|
||||
|
||||
eroday_start = int(obsid[0:5])
|
||||
eroday_stop = int(obsid[5:10])
|
||||
ts_start = eroday_start*14400
|
||||
ts_stop = eroday_stop*14400
|
||||
start = mission2date(ts_start)
|
||||
stop = mission2date(ts_stop)
|
||||
dtime_start = Time(start , format='datetime')
|
||||
dtime_stop = Time(stop , format='datetime')
|
||||
|
||||
survey = ErositaDailyCatalog(name="ErositaDaily",
|
||||
obsid=obsid,
|
||||
eroday_start=eroday_start,
|
||||
eroday_stop=eroday_stop,
|
||||
start=start,
|
||||
stop=stop,
|
||||
mjd_start=dtime_start.mjd,
|
||||
mjd_stop=dtime_stop.mjd)
|
||||
|
||||
survey.save()
|
||||
|
||||
if check_repeat and check_repeat.obsid < obsid:
|
||||
print("removing older version of the catalog")
|
||||
logging.info("remove older version of the catalog: %s" % obsid)
|
||||
check_repeat.delete()
|
||||
|
||||
df=pd.read_pickle(datafile)
|
||||
df.drop(columns=["tilenum", 'srcname', 'hpidx', "TSTART", "ID_SRC",
|
||||
"TSTOP",], inplace=True, errors="ignore")
|
||||
df.rename(columns={"RA":"ra", "DEC":"dec", "RADEC_ERR": "error_radius",
|
||||
"srcID": "srcid", "ML_EXP_1":'APE_EXP_1'}, inplace=True)
|
||||
df["flux"] = df.ML_FLUX_0.values*0.8911e-12
|
||||
df["sxflux"] = df.flux
|
||||
sc = SkyCoord(df.ra.values, df.dec.values, unit="deg", frame="fk5")
|
||||
df["lii"] = sc.galactic.l.value
|
||||
df["bii"] = sc.galactic.b.value
|
||||
sc = sc[df.lii.values < 180.]
|
||||
df = df[df.lii.values < 180.]
|
||||
df["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
df["name"] = [make_source_name("SRGe", row.ra, row.dec) for row in df[["ra", "dec"]].itertuples()]
|
||||
df["healpix_plate"] = hp_plate.skycoord_to_healpix(sc)
|
||||
|
||||
enew = []
|
||||
for idx, src in df.iterrows():
|
||||
enew.append(ErositaSource.objects.create(obsdate=survey.stop, **src))
|
||||
survey.genericsource_set.add(*enew)
|
||||
#newsrc = ErositaSource.objects.filter(catalog=survey) #survey.genericsource_set.all()
|
||||
#newsrc = survey.genericsource_set.all()
|
||||
newsrc = ErositaSource.objects.filter(id__in=survey.genericsource_set.all())
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=CSC2), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SXPS2), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=XMM3DR8), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=RXS2), minrad=20., maxdist=80.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SDSSSpec), minrad=3., maxdist=20.)
|
||||
find_counterparts(newsrc, GenericSource.objects.filter(Q(instance_of=ErositaSource) & Q(erositasource__obsdate__lt=cat.stop - datetime.timedelta(days=10))), minrad=3, maxdist=20)
|
||||
load_simbad_auxcat(newsrc)
|
||||
newsrc = newsrc.annotate(xct=Count("connections", filter=Q(connections__polymorphic_ctype__in=xraycats)))
|
||||
hasxcounterparts = newsrc.filter(xct__gt=0)
|
||||
hasxcounterparts.annotate(mcxflux=Max("connections__sxflux")).filter(sxflux__gt=F("mcxflux")*Value(10.)).update(transient=True)
|
||||
GenericCatalog.objects.get(name="Erosita flux transients").genericsource_set.add(*hasxcounterparts)
|
||||
hasecounterparts = newsrc.annotate(ect=Count("connections", filter=Q(connections__polymorphic_ctype=ContentType.objects.get_for_model(ErositaSource))))
|
||||
hasecounterpartstr = hasecounterparts.filter(ect__gt=0, sxflux__gt=Value(10.)*Max("connections__sxflux", filter=Q(connections__polymorphic_ctype=ContentType.objects.get_for_model(ErositaSource))))
|
||||
hasnoxcounterparts = newsrc.filter(xct=0).annotate(rassexp=Subquery(RosatExpMap.objects.filter(healpix=OuterRef("healpix")).values("exposure")[:1]))
|
||||
GenericCatalog.objects.get(name="Erosita flux transients").genericsource_set.add(*hasecounterpartstr.filter(xct=0))
|
||||
hasnoxcounterparts = hasnoxcounterparts.annotate(rasslim=Value(4.*5.76*0.89211e-12)*Power(Value(1.) + Power(F("rassexp")/Value(425), Value(4.)), Value(0.15))/F("rassexp"))
|
||||
hasnoxcounterparts.filter(rassexp__gt=0, sxflux__gt=F("rasslim")*Value(10.)).update(rosat_transient=True)
|
||||
GenericCatalog.objects.get(name="Erosita rosat expmap transient").genericsource_set.add(*hasnoxcounterparts.filter(rosat_transient=True))
|
||||
load_ztf_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
return newsrc
|
||||
|
||||
|
||||
def find_counterparts(srcquery, counterpatscat, neighsearch=1, **kwargs):
|
||||
"""
|
||||
for all sources in srcquery, which is assumed to have many-to-many relationtip to counterpatscat catalog,
|
||||
performs search of sources in counterpart catalog within max(minrad[=30], 3*sqrt(src.poserr**2 + counterpart.poserr**2))
|
||||
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
srcquery: django.srcquery - iterable providing access to catalog under consideration
|
||||
|
||||
counterpartscat: catalog to search counterparts in
|
||||
counterpartname: name of the many-to-many relationtip field in srcquery
|
||||
**kwargs: additional arguments provided to makeposerrdist functions (see doc within)
|
||||
"""
|
||||
scat = read_frame(srcquery)
|
||||
|
||||
uhealpix = np.unique(scat.healpix.values)
|
||||
uhealpix = np.unique(np.concatenate([uhealpix, neighbours(uhealpix, NSIDE_SOURCES, order=ORDER).ravel()]))
|
||||
|
||||
ccat = read_frame(counterpatscat.filter(healpix__in=uhealpix))
|
||||
if scat.index.size == 0 or ccat.index.size == 0:
|
||||
return 0
|
||||
|
||||
ccat.error_radius.fillna(0., inplace=True)
|
||||
dmat = correlate_utils.makeposerrdist(scat, ccat, **kwargs)
|
||||
uidx, ucts = np.unique(dmat.row, return_counts=True)
|
||||
cs = np.empty(ucts.size + 1, np.int)
|
||||
cs[0] = 0
|
||||
cs[1:] = ucts.cumsum()
|
||||
ce = np.lib.stride_tricks.as_strided(cs, (cs.size - 1, 2), cs.strides*2)
|
||||
glist = []
|
||||
for sid, cid, d in zip(scat.iloc[dmat.row].id.values, ccat.iloc[dmat.col].id.values, dmat.data):
|
||||
if sid == cid:
|
||||
continue
|
||||
src1, src2 = srcquery.get(id=sid), counterpatscat.get(id=cid)
|
||||
glist.append(GenericConnection(connected=src1, connectto=src2, separation=d))
|
||||
glist.append(GenericConnection(connected=src2, connectto=src1, separation=d))
|
||||
print("bulk create")
|
||||
GenericConnection.objects.bulk_create(glist, ignore_conflicts=True)
|
||||
|
||||
def find_closest_counterpart(srcquery, counterpatscat, neighsearch=1):
|
||||
scat = read_frame(srcquery)
|
||||
|
||||
#uhealpix = np.unique(scat.healpix.values)
|
||||
#uhealpix = np.unique(np.concatenate([uhealpix, neighbours(uhealpix, NSIDE_SOURCES, order=ORDER).ravel()]))
|
||||
|
||||
#ccat = read_frame(counterpatscat.filter(healpix__in=uhealpix))
|
||||
ccat = read_frame(counterpatscat)
|
||||
|
||||
if scat.index.size == 0 or ccat.index.size == 0:
|
||||
return 0
|
||||
|
||||
ccat.error_radius.fillna(0., inplace=True)
|
||||
dmat = correlate_utils.makedistmat3d(scat, ccat, 1200)
|
||||
sidx = np.argsort(dmat.data)
|
||||
print(dmat.data.size)
|
||||
urow, uidx = np.unique(dmat.row[sidx], return_index=True)
|
||||
ucol = dmat.col[sidx][uidx]
|
||||
|
||||
glist = []
|
||||
for sid, cid, d in zip(scat.iloc[urow].id.values, ccat.iloc[ucol].id.values, dmat.data[sidx][uidx]):
|
||||
if sid == cid:
|
||||
continue
|
||||
src1, src2 = srcquery.get(id=sid), counterpatscat.get(id=cid)
|
||||
glist.append(GenericConnection(connected=src1, connectto=src2, separation=d))
|
||||
glist.append(GenericConnection(connected=src2, connectto=src1, separation=d))
|
||||
GenericConnection.objects.bulk_create(glist, ignore_conflicts=True)
|
||||
|
||||
def load_simbad_auxcat(genericsources, **kwargs):
|
||||
hp = HEALPix(nside=NSIDE_SOURCES, order=ORDER, frame=FK5())
|
||||
srcs = read_frame(genericsources)
|
||||
s1 = aSimbad()
|
||||
s1.reset_votable_fields()
|
||||
s1.add_votable_fields("z_value", "otype", "plx", "parallax", "plx_error", "plx_bibcode")
|
||||
fails = []
|
||||
simbadgenericcat = GenericCatalog.objects.get(name="Simbad")
|
||||
print(srcs.index.size)
|
||||
for i in range((srcs.index.size + 4095)//4096):
|
||||
eloc = srcs.iloc[i*4096:min((i + 1)*4096, srcs.index.size)]
|
||||
try:
|
||||
simbadc = s1.query_region(SkyCoord(eloc.ra.values, eloc.dec.values, unit="deg", frame="fk5"), radius=kwargs.get("maxdist", 60.)*au.arcsec).to_pandas()
|
||||
except:
|
||||
fails.append(eloc.id.values)
|
||||
continue
|
||||
print(simbadc.index.size)
|
||||
sc = SkyCoord(simbadc.RA.values + " " + simbadc.DEC.values, unit=("hourangle", "deg"), frame="fk5")
|
||||
simbadc["ra"] = sc.fk5.ra.value
|
||||
simbadc["dec"] = sc.fk5.dec.value
|
||||
simbadc["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
#simbadc['name'] = simbadc.MAIN_ID.str.decode("ascii")
|
||||
#print(simbadc.name)
|
||||
#simbadc["name"] = simbadc.name.astype("str")
|
||||
simbadc["name"] = simbadc.MAIN_ID.astype("str")
|
||||
print(simbadc.name)
|
||||
simbadc['main_id'] = simbadc.name
|
||||
simbadc['obj_class'] = simbadc.OTYPE.str.decode("ascii")
|
||||
simbadc['coo_bibcode'] = simbadc.COO_BIBCODE.str.decode("ascii")
|
||||
simbadc['coo_wavelength'] = simbadc.COO_WAVELENGTH.str.decode("ascii")
|
||||
simbadc.PLX_BIBCODE = simbadc.PLX_BIBCODE.fillna(simbadc.PLX_BIBCODE_2)
|
||||
simbadc['plx_bib'] = simbadc.PLX_BIBCODE.str.decode("ascii")
|
||||
simbadc.PLX_VALUE = simbadc.PLX_VALUE.fillna(simbadc.PLX_VALUE_2)
|
||||
simbadc.PLX_ERROR = simbadc.PLX_ERROR.fillna(simbadc.PLX_ERROR_2)
|
||||
print(simbadc)
|
||||
print(simbadc.name)
|
||||
#return simbadc
|
||||
simbadc.drop_duplicates('name', inplace=True)
|
||||
simbadc.rename(columns={"COO_ERR_MINA": "error_radius", "Z_VALUE": "z",
|
||||
"PLX_VALUE":"plx", "PLX_ERROR": "plx_err"}, inplace=True)
|
||||
simbadc.error_radius.fillna(0., inplace=True)
|
||||
|
||||
duplicates = SimbadSource.objects.filter(name__in=simbadc.name.values)
|
||||
print(simbadc.index.size)
|
||||
print(simbadc.name)
|
||||
if duplicates.exists():
|
||||
duplicatesnames = np.array(duplicates.values_list("name"))[:, 0]
|
||||
mask = np.isin(simbadc.name.values, duplicatesnames, invert=True)
|
||||
simbadc = simbadc[mask]
|
||||
print(simbadc.index.size)
|
||||
simbadc.drop(columns=["RA", "DEC", "RA_PREC", "DEC_PREC", "COO_ERR_MAJA",
|
||||
"COO_ERR_ANGLE", "COO_QUAL", "MAIN_ID",
|
||||
"OTYPE", "COO_WAVELENGTH", "COO_BIBCODE",
|
||||
"PLX_BIBCODE", "PLX_ERROR_2", "PLX_QUAL",
|
||||
"OTYPE", "PLX_VALUE_2", "PLX_PREC",
|
||||
"PLX_BIBCODE_2"], inplace=True)
|
||||
|
||||
sobjlist = []
|
||||
for i, sobj in simbadc.iterrows():
|
||||
sobjlist.append(SimbadSource.objects.create(**sobj))
|
||||
simbadgenericcat.genericsource_set.add(*sobjlist)
|
||||
|
||||
#ech = erosrcs.filter(id__in=eloc.id.values)
|
||||
find_counterparts(genericsources, GenericSource.objects.filter(instance_of=SimbadSource), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
|
||||
def load_allwise_auxcat(genericsources, **kwargs):
|
||||
hp = HEALPix(nside=NSIDE_SOURCES, order=ORDER, frame=FK5())
|
||||
srcs = read_frame(genericsources)
|
||||
#s1.reset_votable_fields()
|
||||
fails = []
|
||||
allwisegenericcat = GenericCatalog.objects.get(name="AllWISE")
|
||||
for i in range((srcs.index.size + 4095)//4096):
|
||||
eloc = srcs.iloc[i*4096:min((i + 1)*4096, srcs.index.size)]
|
||||
print(eloc.index.size)
|
||||
#allwisec = Vizier.query_region(SkyCoord(eloc.ra.values, eloc.dec.values, unit="deg", frame="fk5"), radius=kwargs.get("maxdist", 10.)*au.arcsec, catalog="II/328").to_pandas()
|
||||
try:
|
||||
allwisec = Vizier.query_region(SkyCoord(eloc.ra.values, eloc.dec.values, unit="deg", frame="fk5"), radius=kwargs.get("maxdist", 10.)*au.arcsec, catalog="II/328")[0].to_pandas()
|
||||
except:
|
||||
print("fail to load")
|
||||
fails.append(eloc.id.values)
|
||||
continue
|
||||
print(allwisec.index.size)
|
||||
allwisec.rename(columns={"RAJ2000":"ra", "DEJ2000":"dec", "W1mag":"w1mag", "W2mag":"w2mag","W3mag":"w3mag","W4mag":"w4mag",
|
||||
"e_W1mag":"e_w1mag", "e_W2mag":"e_w2mag","e_W3mag":"e_w3mag","e_W4mag":"e_w4mag", "AllWISE": "name"}, inplace=True)
|
||||
sc = SkyCoord(allwisec.ra.values, allwisec.dec.values, unit="deg", frame="fk5")
|
||||
allwisec["lii"] = sc.galactic.l.value
|
||||
allwisec["bii"] = sc.galactic.b.value
|
||||
allwisec["name"] = allwisec.name.astype(str)
|
||||
allwisec["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
allwisec.drop_duplicates('name', inplace=True)
|
||||
allwisec["error_radius"] = np.zeros(allwisec.index.size)
|
||||
duplicates = AllWISEgs.objects.filter(name__in=allwisec.name.values)
|
||||
if duplicates.exists():
|
||||
duplicatesnames = np.array(duplicates.values_list("name"))[:, 0]
|
||||
mask = np.isin(allwisec.name.values, duplicatesnames, invert=True)
|
||||
allwisec = allwisec[mask]
|
||||
allwisec.drop(columns=allwisec.columns.difference([f.name for f in AllWISEgs._meta.get_fields()]), inplace=True)
|
||||
|
||||
sobjlist = []
|
||||
for i, sobj in allwisec.iterrows():
|
||||
sobjlist.append(AllWISEgs.objects.create(**sobj))
|
||||
allwisegenericcat.genericsource_set.add(*sobjlist)
|
||||
|
||||
#ech = erosrcs.filter(id__in=eloc.id.values)
|
||||
find_counterparts(genericsources, GenericSource.objects.filter(instance_of=AllWISEgs), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
|
||||
def load_NVSS_auxcat(genericsources, **kwargs):
|
||||
hp = HEALPix(nside=NSIDE_SOURCES, order=ORDER, frame=FK5())
|
||||
srcs = read_frame(genericsources)
|
||||
fails = []
|
||||
nvssgenericcat = GenericCatalog.objects.get(name="NVSS")
|
||||
for i in range((srcs.index.size + 4095)//4096):
|
||||
eloc = srcs.iloc[i*4096:min((i + 1)*4096, srcs.index.size)]
|
||||
print(eloc.index.size)
|
||||
#nvssc = Vizier.query_region(SkyCoord(eloc.ra.values, eloc.dec.values, unit="deg", frame="fk5"), radius=kwargs.get("maxdist", 10.)*au.arcsec, catalog="II/328").to_pandas()
|
||||
try:
|
||||
nvssc = Vizier.query_region(SkyCoord(eloc.ra.values, eloc.dec.values, unit="deg", frame="fk5"), radius=kwargs.get("maxdist", 10.)*au.arcsec, catalog="VIII/65")[0].to_pandas()
|
||||
except:
|
||||
print("fail to load")
|
||||
fails.append(eloc.id.values)
|
||||
continue
|
||||
print(nvssc.index.size)
|
||||
nvssc.rename(columns={"NVSS": "name", "S1.4":"s14", "e_S1.4":"s14e", "MajAxis":"maxjaxis", "MinAxis":"minaxis"}, inplace=True)
|
||||
|
||||
sc = SkyCoord(nvssc.RAJ2000.values, nvssc.DEJ2000.values, unit=("hourangle", "deg"), frame="fk5")
|
||||
nvssc["ra"] = sc.ra.value
|
||||
nvssc["dec"] = sc.dec.value
|
||||
nvssc["lii"] = sc.galactic.l.value
|
||||
nvssc["bii"] = sc.galactic.b.value
|
||||
nvssc["name"] = nvssc.name.astype(str)
|
||||
nvssc["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
nvssc.drop_duplicates('name', inplace=True)
|
||||
nvssc["error_radius"] = np.sqrt(nvssc.e_RAJ2000.values**2.*np.cos(np.deg2rad(nvssc.dec.values))**2. + nvssc.e_DEJ2000.values**2.)
|
||||
duplicates = NVSSgs.objects.filter(name__in=nvssc.name.values)
|
||||
if duplicates.exists():
|
||||
duplicatesnames = np.array(duplicates.values_list("name"))[:, 0]
|
||||
mask = np.isin(nvssc.name.values, duplicatesnames, invert=True)
|
||||
nvssc = nvssc[mask]
|
||||
nvssc.drop(columns=nvssc.columns.difference([f.name for f in NVSSgs._meta.get_fields()]), inplace=True)
|
||||
|
||||
sobjlist = []
|
||||
for i, sobj in nvssc.iterrows():
|
||||
sobjlist.append(NVSSgs.objects.create(**sobj))
|
||||
nvssgenericcat.genericsource_set.add(*sobjlist)
|
||||
find_counterparts(genericsources, GenericSource.objects.filter(instance_of=NVSSgs), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
|
||||
|
||||
def load_ztf_auxcat(genericsources, **kwargs):
|
||||
hp = HEALPix(nside=NSIDE_SOURCES, order=ORDER, frame=FK5())
|
||||
hp_plates = HEALPix(nside=NSIDE_PLATES,
|
||||
order=ORDER_PLATES, frame=FK5())
|
||||
srcs = read_frame(genericsources)
|
||||
uhealpix = np.unique(srcs.healpix.values)
|
||||
uhealpix = np.unique(np.concatenate([uhealpix, neighbours(uhealpix, NSIDE_SOURCES, order=ORDER).ravel()]))
|
||||
|
||||
ztfurl = "https://mars.lco.global/"
|
||||
params = {'format':'json', 'rb__gte':0.8,'nbad__lte':0,'fwhm__lte':5,'elong__lte':1.2}
|
||||
ztfsrc = []
|
||||
ztfloaded = ZTFOid.objects.filter(healpix__in=uhealpix)
|
||||
|
||||
for i, row in srcs.iterrows():
|
||||
print("%d out of %d, %d loaded, ra & dec: %.5f %.5f" % (i, srcs.index.size, len(ztfsrc), row.ra, row.dec))
|
||||
params["cone"] = "%.5f,%.5f,%.5f" % (row.ra, row.dec, kwargs.get("maxdist", 10)/3600.)
|
||||
r = requests.get(url=ztfurl, params=params)
|
||||
if not r.ok: continue
|
||||
ztfsrc += r.json()["results"]
|
||||
|
||||
|
||||
def unpack_mars_dict(d):
|
||||
s = d["candidate"]
|
||||
s["lco_id"] = d["lco_id"]
|
||||
s["objectId"] = d["objectId"]
|
||||
return pandas.Series(s)
|
||||
|
||||
pd = pandas.DataFrame([unpack_mars_dict(z) for z in ztfsrc])
|
||||
pd = pd.drop_duplicates("candid")
|
||||
pd.to_pickle("/srg/a1/work/andrey/rass/tmp_ztf_dumpy.pkl")
|
||||
|
||||
sc = SkyCoord(pd.ra.values, pd.dec.values, unit="deg", frame="fk5")
|
||||
pd["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
pd["healpix_plate"] = hp_plates.skycoord_to_healpix(sc)
|
||||
format_string='%a, %d %b %Y %H:%M:%S %Z'
|
||||
pd["wt"] = [datetime.datetime.strptime(t, format_string) for t in pd.wall_time.values]
|
||||
pd.drop(columns=["wall_time"], inplace=True)
|
||||
pd.rename(columns={"wt":"wall_time"}, inplace=True)
|
||||
pd.drop(columns=pd.columns.difference(["objectId",] + [field.name for field in ZTFlc._meta.get_fields()]), inplace=True)
|
||||
pdgrp = pd.groupby("objectId")
|
||||
ztfurl = "https://ztf.alerce.online/query"
|
||||
ztfcat = GenericCatalog.objects.get(name="ZTF")
|
||||
ztffieldnames = [field.name for field in ZTFOid._meta.get_fields()]
|
||||
for name, group in pdgrp:
|
||||
try:
|
||||
ztfobj = ZTFOid.objects.get(objectId=name)
|
||||
except ObjectDoesNotExist:
|
||||
r = requests.post(ztfurl, json={"total":1, "query_parameters":{"filters":{"oid":name}}})
|
||||
if r.ok and r.json()["result"] and name in r.json()["result"]:
|
||||
ztfobj = r.json()["result"][name]
|
||||
ztfobj["objectId"] = ztfobj["oid"]
|
||||
ztfobj["ra"] = ztfobj["meanra"]
|
||||
ztfobj["dec"] = ztfobj["meandec"]
|
||||
sc = SkyCoord(ztfobj["meanra"], ztfobj["meandec"], unit="deg", frame="fk5")
|
||||
ztfobj["healpix"] = hp.skycoord_to_healpix(sc)
|
||||
ztfobj["healpix_plate"] = hp_plates.skycoord_to_healpix(sc)
|
||||
ztfobj["name"] = ztfobj["objectId"]
|
||||
print(ztfobj)
|
||||
ztfobj = {k:v for k, v in ztfobj.items() if k in ztffieldnames}
|
||||
if ztfobj["classrf"] is None:
|
||||
ztfobj["classrf"] = "Not specified"
|
||||
ztfobj["pclassrf"] = 1.
|
||||
print(ztfobj)
|
||||
ztfobj = ZTFOid.objects.create(**ztfobj)
|
||||
ztfcat.genericsource_set.add(ztfobj)
|
||||
else:
|
||||
continue
|
||||
clist = np.array([l[0] for l in ztfobj.ztflc_set.values_list("candid")])
|
||||
for i, row in group[~group.candid.isin(clist)].drop(columns=["objectId",]).iterrows():
|
||||
ZTFlc.objects.create(ztfobject=ztfobj, **row)
|
||||
|
||||
find_counterparts(genericsources, GenericSource.objects.filter(instance_of=ZTFOid), maxdist=kwargs.get("maxdist", 10.), minrad=kwargs.get("minrad", 3.))
|
||||
|
||||
ztfclasses={
|
||||
None: "Not specified",
|
||||
'null':"Not specified",
|
||||
"classified":"Classified",
|
||||
"not classified":"Not classified",
|
||||
1:"Ceph",
|
||||
2:"DSCT", #Delta SCuTi, a class of pulsating variable stars named after Delta Scuti, the archetype for the class
|
||||
3:"EB", # Eclipsing Beta Lyrae, a class of eclipsing binary stars named after Beta Lyrae, the archetype for the class
|
||||
4:"LPV", #Long Period Variable, a type of variable star that changes in brightness slowly over time
|
||||
5:"RRL", #RR Lyrae, a class of pulsating variable stars named after RR Lyrae, the archetype of the class
|
||||
6:"SN",
|
||||
0:"Other",
|
||||
7:"AGN I",
|
||||
8:"Blazar",
|
||||
9:"CV/Nova",
|
||||
10:"SN Ia",
|
||||
11:"SN Ibc",
|
||||
12:"SN II",
|
||||
14:"SLSN", #A super-luminous supernova (SLSN, plural super luminous supernovae or SLSNe) is a type of stellar explosion with a luminosity 10 or more times higher than that of standard supernovae
|
||||
15:"EB/SD/D", # SD - subdwarf, stars fainter than main-sequence stars with the same colors; often used as a prefix to a star's spectral type
|
||||
16:"EB/C",
|
||||
17:"Periodic/Other",
|
||||
18:"AGN",
|
||||
19:"SN",
|
||||
20:"Variable Star",
|
||||
21:"Asteroid",
|
||||
22:"Bogus",
|
||||
23:"RS-CVn",
|
||||
24:"QSO-I"}
|
||||
|
278
srglib/load_transinets.py
Normal file
278
srglib/load_transinets.py
Normal file
@@ -0,0 +1,278 @@
|
||||
import pandas
|
||||
import glob
|
||||
import os
|
||||
from srglib import correlate_utils
|
||||
from srglib import genericsourceutils as gsu
|
||||
from astropy.coordinates import SkyCoord
|
||||
from django_pandas.io import read_frame
|
||||
import numpy as np
|
||||
from astropy.time import Time, TimeDelta
|
||||
import datetime
|
||||
from math import sin, cos, sqrt, pi
|
||||
from ErositaDailySourceCatalogues.models import ErositaSource
|
||||
import pytz
|
||||
import subprocess
|
||||
from genericsource.models import *
|
||||
from ErositaDailySourceCatalogues import views as eviews
|
||||
from ErositaDailySourceCatalogues.models import *
|
||||
from auxcatalogues.models import SimbadSource, RXS2, SXPS2, CSC2, \
|
||||
XMM3DR8, ZTFSource, SDSSSpec, GAIASource, ZTFOid, ZTFlc, AllWISEgs, \
|
||||
NVSSgs
|
||||
|
||||
northpole = np.array([-4.26367573e-16, -3.97777165e-01, 9.17482058e-01])
|
||||
surveystart = np.array([-7.43149503e-06, -4.00344939e-04, -1.73318355e-04])
|
||||
pangle = np.cross(northpole, surveystart)
|
||||
pangle = pangle/np.sqrt(np.sum(pangle**2.))
|
||||
|
||||
SurveyCatName = {1: "1st survey transients",
|
||||
2: "2nd survey transients",
|
||||
3: "3rd survey transients",}
|
||||
|
||||
|
||||
def add_aux_data(srcid, survey, coord=None, newsourceid=None):
|
||||
#print("/data/erosita/transients/products/trans_%d_e%d-1.png" % (srcid, survey))
|
||||
phimg = glob.glob("/data/erosita/transients/products/trans_%d_e%d.png" % (srcid, survey))
|
||||
|
||||
res = {k: "" for k in ["spec", "specimg", "arf", "rmf", "bkg", "reg", "log", "lcfits", "lcpdf"]}
|
||||
if phimg: res["img"] = os.path.basename(phimg[0])
|
||||
cwd = os.getcwd()
|
||||
os.chdir("/data/erosita/transients/products")
|
||||
rname = "" if not "spec" in res else res["spec"]
|
||||
if newsourceid:
|
||||
dpath = "/data/products/erotrans/%d" % newsourceid
|
||||
ppath = "/data/erosita/transients/products/"
|
||||
if not os.path.exists(dpath):
|
||||
os.mkdir(dpath)
|
||||
if "img" in res: subprocess.run(["cp", os.path.join(ppath, res["img"]), os.path.join(dpath, res["img"])])
|
||||
os.chdir(dpath)
|
||||
return res
|
||||
|
||||
|
||||
def get_closest_unique_withinapp(ecat, ncat, apps):
|
||||
"""
|
||||
find all closest unique counterparts within definde distance
|
||||
"""
|
||||
urow = np.empty(0, np.int)
|
||||
ucol = np.empty(0, np.int)
|
||||
if ecat.index.size:
|
||||
maske = np.ones(ecat.index.size, np.bool)
|
||||
maskn = np.ones(ncat.index.size, np.bool)
|
||||
eidx = np.arange(ecat.index.size)
|
||||
nidx = np.arange(ncat.index.size)
|
||||
for i in range(21):
|
||||
if not (np.any(maske) and np.any(maskn)):
|
||||
break
|
||||
row, col = correlate_utils.get_closest_unique(ecat[maske], ncat[maskn], apps)
|
||||
print(row.size)
|
||||
if row.size == 0 or np.all(~maske) or np.all(~maskn):
|
||||
break
|
||||
urow = np.concatenate([urow, eidx[maske][row]])
|
||||
ucol = np.concatenate([ucol, nidx[maskn][col]])
|
||||
maske[urow] = False
|
||||
maskn[ucol] = False
|
||||
|
||||
if False: #urow.size:
|
||||
mask = np.array([abs((ecat.iloc[i].obsdate - ncat.iloc[j].obsdate).days) for i, j in zip(urow, ucol)]) < 3
|
||||
urow, ucol = urow[mask], ucol[mask]
|
||||
return urow, ucol
|
||||
|
||||
|
||||
|
||||
def load_nondetections(ecat, ncat, catsrc, apps, survey):
|
||||
urow, ucol = get_closest_unique_withinapp(ecat, ncat, apps)
|
||||
print(urow.size, ncat.index.size, ecat.index.size)
|
||||
ncatidd = ncat.drop(columns=["id",])
|
||||
|
||||
for i, j in zip(urow, ucol):
|
||||
src = catsrc.filter(id__in=[ecat.iloc[i].id,])
|
||||
src.update(**ncatidd.iloc[j])
|
||||
src = src.first()
|
||||
sc = SkyCoord(src.ra, src.dec, unit="deg", frame="fk5")
|
||||
auxdata = add_aux_data(ncat.iloc[j].id, survey, sc, src.id)
|
||||
try:
|
||||
auxdata = add_aux_data(ncat.iloc[j].id, survey, sc, src.id)
|
||||
except Exception:
|
||||
print("warn: no spectum info for %d found" % ncat.iloc[j].id)
|
||||
else:
|
||||
#print("auxdata", auxdata)
|
||||
try:
|
||||
adata = src.srcauxdata
|
||||
except:
|
||||
adata = SrcAuxDATA.objects.create(src=src)
|
||||
|
||||
#print("update source", auxdata)
|
||||
for key, val in auxdata.items():
|
||||
adata.__setattr__(key, val)
|
||||
#print(key, val, getattr(adata, key))
|
||||
adata.save()
|
||||
for key in ["spec", "specimg", "arf", "rmf", "bkg", "reg", "log", "lcfits", "lcpdf"]:
|
||||
adata.__setattr__(key, "")
|
||||
adata.save()
|
||||
|
||||
newsrc = []
|
||||
if ucol.size < ncat.index.size:
|
||||
idx = np.arange(ncat.index.size)
|
||||
idx = idx[np.isin(idx, ucol, invert=True, assume_unique=True)]
|
||||
print(idx.size)
|
||||
for i in idx:
|
||||
#print(ncatidd.iloc[i])
|
||||
src = ErositaSource.objects.create(**ncatidd.iloc[i], srcstatus="new")
|
||||
sc = SkyCoord(src.ra, src.dec, unit="deg", frame="fk5")
|
||||
print("processing", ncat.iloc[i].id, src.id)
|
||||
auxdata = add_aux_data(ncat.iloc[i].id, survey, sc, src.id)
|
||||
try:
|
||||
auxdata = add_aux_data(ncat.iloc[i].id, survey, sc, src.id)
|
||||
except Exception:
|
||||
print("warn: no spectum info found")
|
||||
else:
|
||||
pass
|
||||
#print('create new source', auxdata)
|
||||
SrcAuxDATA.objects.create(src=src, **auxdata)
|
||||
newsrc.append(src)
|
||||
|
||||
#catsrc.genericsource_set.add(*newsrc)
|
||||
return newsrc
|
||||
|
||||
|
||||
|
||||
def update_columns(mtable, survey):
|
||||
sc = SkyCoord(mtable.ra.values, mtable.dec.values, unit="deg", frame="fk5")
|
||||
mtable["lii"] = sc.galactic.l.value
|
||||
mtable["bii"] = sc.galactic.b.value
|
||||
mtable["healpix"] = gsu.hp.skycoord_to_healpix(sc)
|
||||
mtable["healpix_plate"] = gsu.hp_plates.skycoord_to_healpix(sc)
|
||||
mtable["detected"] = False
|
||||
mtable["name"] = [gsu.make_source_name("SRGe", r, d) for r, d in zip(mtable.ra.values, mtable.dec.values)]
|
||||
mtable["sxflux"] = mtable["UPLIM_e%d" % survey]
|
||||
mtable["id"] = mtable.index.values
|
||||
|
||||
"""
|
||||
vecs = np.empty((mtable.index.size, 3), np.double)
|
||||
vecs[:, 0] = np.cos(mtable.dec.values*pi/180.)*np.cos(mtable.ra.values*pi/180.)
|
||||
vecs[:, 1] = np.cos(mtable.dec.values*pi/180.)*np.sin(mtable.ra.values*pi/180.)
|
||||
vecs[:, 2] = np.sin(mtable.dec.values*pi/180.)
|
||||
|
||||
vecsp = vecs - northpole*np.sum(northpole*vecs, axis=1)[:, np.newaxis]
|
||||
vecsp = vecsp/np.sqrt(np.sum(vecsp**2, axis=1))[:, np.newaxis]
|
||||
|
||||
alpha = np.arctan2(np.sum(vecsp*pangle, axis=1), np.sum(vecsp*surveystart, axis=1))
|
||||
alpha[alpha < 0] = pi + alpha[alpha < 0.]
|
||||
"""
|
||||
|
||||
tstart = mtable["TSTART_e%d" % survey].values
|
||||
tstop = mtable["TSTOP_e%d" % survey].values
|
||||
|
||||
"""
|
||||
mtable["obsdate"] = (Time(51543.87, format="mjd") + TimeDelta((tstart + tstop)/2., format="sec")).to_datetime(timezone=pytz.UTC)
|
||||
mtable["tstart"] = (Time(51543.87, format="mjd") + TimeDelta(tstart, format="sec")).to_datetime(timezone=pytz.UTC)
|
||||
mtable["tstop"] = (Time(51543.87, format="mjd") + TimeDelta(tstop, format="sec")).to_datetime(timezone=pytz.UTC)
|
||||
"""
|
||||
mtable["tstart"] = mtable["TSTART_e%d" % survey]
|
||||
mtable["tstop"] = mtable["TSTOP_e%d" % survey]
|
||||
mtable["obsdate"] = mtable["TSTART_e%d" % survey] + (mtable["TSTOP_e%d" % survey] - mtable["TSTART_e%d" % survey])/2.
|
||||
|
||||
mtable.drop(columns=mtable.columns.difference([f.name for f in ErositaSource._meta.get_fields()] + ["id",]), inplace=True)
|
||||
print(survey, mtable.index.size)
|
||||
return mtable
|
||||
|
||||
|
||||
def read_master_table(mastertablename, **kwargs):
|
||||
mtable = pandas.read_pickle(mastertablename)
|
||||
|
||||
newsrc = []
|
||||
for survey in [2,]: #np.arange(1, 4):
|
||||
mel = mtable[mtable["CTS_e%d" % survey].isna() & ~mtable["TSTART_e%d" % survey].isna()].copy()
|
||||
mel = update_columns(mel, survey)
|
||||
ccat = ErositaSource.objects.filter(id__in=GenericCatalog.objects.get(name=SurveyCatName[survey]).genericsource_set.all())
|
||||
ecat = read_frame(ccat)
|
||||
nset = load_nondetections(ecat, mel, ccat, 30, survey)
|
||||
GenericCatalog.objects.get(name=SurveyCatName[survey]).genericsource_set.add(*nset)
|
||||
newsrc += nset
|
||||
|
||||
if len(newsrc) > 0:
|
||||
newsrc = GenericSource.objects.filter(id__in=[src.id for src in newsrc])
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=CSC2), minrad=3., maxdist=20.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SXPS2), minrad=3., maxdist=20.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=XMM3DR8), minrad=3., maxdist=20.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=RXS2), minrad=20., maxdist=80.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SDSSSpec), minrad=3., maxdist=20.)
|
||||
gsu.find_from_old_gaia_db(newsrc, minrad=5, maxdist=20)
|
||||
#gsu.load_allwise_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=AllWISEgs), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
#gsu.load_NVSS_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=NVSSgs), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
#find_counterparts(newsrc, GenericSource.objects.filter(Q(instance_of=ErositaSource) & Q(erositasource__obsdate__lt=cat.stop - datetime.timedelta(days=10))), minrad=3, maxdist=20)
|
||||
#gsu.load_simbad_auxcat(newsrc)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SimbadSource), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
#gsu.load_ztf_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=ZTFOid), maxdist=kwargs.get("maxdist", 10.), minrad=kwargs.get("minrad", 3.))
|
||||
|
||||
def set_transient_data(ecat):
|
||||
ecat = eviews.create_annotations(ecat)
|
||||
ecat = eviews.addErositaSrcannotattion(ecat)
|
||||
ecat = ecat.filter(ratio_to_maxxflux__gt=1.)
|
||||
print(ecat.count())
|
||||
for src in ecat:
|
||||
try:
|
||||
ErositaTransient.objects.create(src=src, plx=src.plx, plxe=src.plxe,
|
||||
z=src.ssz if src.ssz else src.sdssz,
|
||||
ssep=src.ssep,
|
||||
sname=src.sname if src.sname else "",
|
||||
ztfcl=src.ztfcl if src.ztfcl else "",
|
||||
ztfnm=src.ztfnm if src.ztfnm else "",
|
||||
zsep=src.zsep, plx_sep=src.plx_sep,
|
||||
plx_mag=src.plx_mag,
|
||||
sclass=src.sclass if src.sclass else "",
|
||||
ratio_to_maxxflux=src.ratio_to_maxxflux,
|
||||
ratio_to_minxflux=src.ratio_to_minxflux)
|
||||
except:
|
||||
raise
|
||||
print(src.name, " already has transints")
|
||||
else:
|
||||
print("transient for %s created" % src.name)
|
||||
|
||||
def read_master_table_and_surveys(mastertablename, surveynames, **kwargs):
|
||||
mtable = pandas.read_pickle(mastertablename)
|
||||
mtable.rename(columns={"RA": "ra", "DEC": "dec"}, inplace=True)
|
||||
elast = ErositaSource.objects.last() #order_by("created").last()
|
||||
newsrc = []
|
||||
|
||||
for survey in np.arange(1, 4):
|
||||
detectedsources, ftime = gsu.read_esrc_pack(surveynames % survey)
|
||||
print("load survey", survey, surveynames % survey)
|
||||
detectedsources["survey"] = survey
|
||||
dmat = correlate_utils.makedistmat3d(detectedsources, mtable, 40.)
|
||||
urow, ucol = correlate_utils.select_closest_pairs(dmat.data, dmat.row, dmat.col)
|
||||
detectedsources = detectedsources.iloc[urow].set_index(mtable.index.values[ucol])
|
||||
ns = gsu.load_erosita_trans_daily_cat(detectedsources, ftime, 30, load_cat=SurveyCatName[survey])
|
||||
newsrc += list(ns)
|
||||
|
||||
mel = mtable[mtable["CTS_e%d" % survey].isna() & ~mtable["TSTART_e%d" % survey].isna()].copy()
|
||||
mel = update_columns(mel, survey)
|
||||
ccat = ErositaSource.objects.filter(id__in=GenericCatalog.objects.get(name=SurveyCatName[survey]).genericsource_set.all())
|
||||
ecat = read_frame(ccat)
|
||||
nset = load_nondetections(ecat, mel, ccat, 30, survey)
|
||||
GenericCatalog.objects.get(name=SurveyCatName[survey]).genericsource_set.add(*nset)
|
||||
newsrc += nset
|
||||
|
||||
|
||||
allloaded = ErositaSource.objects.filter(created__gt=elast.created)
|
||||
if len(newsrc) > 0:
|
||||
newsrc = GenericSource.objects.filter(id__in=[src.id for src in newsrc])
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=CSC2), minrad=3., maxdist=20.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SXPS2), minrad=3., maxdist=20.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=XMM3DR8), minrad=3., maxdist=20.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=RXS2), minrad=20., maxdist=80.)
|
||||
gsu.find_counterparts(newsrc, GenericSource.objects.filter(instance_of=SDSSSpec), minrad=3., maxdist=20.)
|
||||
gsu.find_from_old_gaia_db(newsrc, minrad=5, maxdist=20)
|
||||
gsu.load_allwise_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
gsu.find_counterparts(allloaded, GenericSource.objects.filter(instance_of=AllWISEgs), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
gsu.load_NVSS_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
gsu.find_counterparts(allloaded, GenericSource.objects.filter(instance_of=NVSSgs), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
gsu.load_simbad_auxcat(newsrc)
|
||||
gsu.find_counterparts(allloaded, GenericSource.objects.filter(instance_of=SimbadSource), maxdist=kwargs.get("maxdist", 20.), minrad=kwargs.get("minrad", 3.))
|
||||
gsu.load_ztf_auxcat(newsrc, maxdist=10, minrad=3)
|
||||
gsu.find_counterparts(allloaded, GenericSource.objects.filter(instance_of=ZTFOid), maxdist=kwargs.get("maxdist", 10.), minrad=kwargs.get("minrad", 3.))
|
||||
set_transient_data(allloaded)
|
||||
GenericCatalog.objects.get(name='transients 2').genericsource_set.add(*allloaded.exclude(erositatransient=None))
|
||||
|
0
srglib/migrations/__init__.py
Normal file
0
srglib/migrations/__init__.py
Normal file
3
srglib/models.py
Normal file
3
srglib/models.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.db import models
|
||||
|
||||
# Create your models here.
|
104
srglib/tasks.py
Normal file
104
srglib/tasks.py
Normal file
@@ -0,0 +1,104 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
from django.utils import timezone
|
||||
|
||||
from srglib.utils import load_erotrans_table, load_srg_data_dumps
|
||||
from srglib.utils import mark_new_skymap_sources_in_latest, update_allsky_missed
|
||||
from srglib.utils import load_skymap_sources_dir
|
||||
from srglib.ztf import load_ztf_alerce
|
||||
import pytz
|
||||
|
||||
from erotrans.models import EroTrans
|
||||
from monthplan.utils import load_surveypath_data, load_monthplan_dir
|
||||
|
||||
import glob
|
||||
from django.contrib.auth.models import User
|
||||
from django.core import mail
|
||||
from django.template.loader import render_to_string
|
||||
from django.utils.html import strip_tags
|
||||
from django.core.mail import send_mail
|
||||
|
||||
from celery import shared_task
|
||||
from srg.celery import app
|
||||
|
||||
import logging
|
||||
|
||||
@app.task
|
||||
def load_monthplan_task():
|
||||
load_monthplan_dir('/export/django/srg/data/npol/fits')
|
||||
|
||||
@app.task
|
||||
def load_surveypath_task():
|
||||
path = "/export/django/srg/data/npol/DMV/ARJ-SCAN/20??????_??????_???????????.iki"
|
||||
logging.info("load SRG survey path {}".format(path))
|
||||
ikifiles = []
|
||||
for file in glob.glob(path):
|
||||
ikifiles.append(file)
|
||||
|
||||
for file in ikifiles:
|
||||
load_surveypath_data(file,100,notify=True)
|
||||
|
||||
@app.task
|
||||
def load_srg_data_dumps_task():
|
||||
dir='/srg/work/oper/staff/kate/skymap_v0'
|
||||
load_srg_data_dumps(dir)
|
||||
#load_skymap_sources_dir('/srg/work/oper/staff/kate/skymap/catalog')
|
||||
#mark_new_skymap_sources_in_latest()
|
||||
#update_allsky_missed()
|
||||
|
||||
@app.task
|
||||
def load_skymap_sources_task():
|
||||
load_skymap_sources_dir('/srg/work/oper/staff/kate/skymap/catalog')
|
||||
#mark_new_skymap_sources_in_latest()
|
||||
update_allsky_missed()
|
||||
|
||||
@app.task
|
||||
def load_ztf_alerce_task():
|
||||
logging.info("load ALeRCE ZTF alerts for last 2 days")
|
||||
load_ztf_alerce(2)
|
||||
|
||||
@app.task
|
||||
def load_ztf_alerts_task():
|
||||
""" deprecated """
|
||||
logging.warning("load_ztf_alerts_task")
|
||||
load_ztf_alerts()
|
||||
|
||||
@app.task
|
||||
def load_erosita_daily_source_list():
|
||||
""" Loads eRosita daily source catalog
|
||||
|
||||
In /export/django/srg, use the following command to start celery.worker:
|
||||
celery -A srg worker --pool=solo -l info --loglevel=DEBUG
|
||||
|
||||
Use the following command to start celery.beat:
|
||||
celery -A srg beat
|
||||
"""
|
||||
|
||||
users = User.objects.filter(groups__name='srg-erosita-transients')
|
||||
to_emails=[]
|
||||
for user in users:
|
||||
to_emails.append(user.email)
|
||||
|
||||
pklfiles = []
|
||||
for filename in glob.glob("/data/erosita/transients/srclist_*.pkl"):
|
||||
pklfiles.append(file)
|
||||
|
||||
|
||||
for filename in pklfiles:
|
||||
res = load_erotrans_table(file)
|
||||
if (res > 0):
|
||||
try:
|
||||
cat = EroTrans.objects.get(pk=res)
|
||||
except:
|
||||
logging.error("EroTrans with pk=%d not found", res)
|
||||
continue
|
||||
"""
|
||||
Send email notification to group
|
||||
"""
|
||||
nomatch_count=cat.erotranssource_set.filter(heasarc=None).count()
|
||||
subject = "New eRosita catalog %s" % cat.obsid
|
||||
html_message = render_to_string('erotrans/erotrans_loaded_email.html', {'cat':cat,'nomatch_count':nomatch_count})
|
||||
plain_message = strip_tags(html_message)
|
||||
from_email = 'Roman Krivonos <krivonos@cosmos.ru>'
|
||||
|
||||
#mail.send_mail(subject, plain_message, from_email, to_emails, html_message=html_message)
|
3
srglib/tests.py
Normal file
3
srglib/tests.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.test import TestCase
|
||||
|
||||
# Create your tests here.
|
44
srglib/update_erotrans_tables.py
Normal file
44
srglib/update_erotrans_tables.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from ErositaDailySourceCatalogues import views as eviews
|
||||
from ErositaDailySourceCatalogues.models import *
|
||||
from genericsource.models import *
|
||||
from srglib import genericsourceutils as gsu
|
||||
from srglib.load_transinets import SurveyCatName
|
||||
from django.db.models import F, Q, Max, Subquery, OuterRef, Min, Count, Value
|
||||
from functools import reduce
|
||||
|
||||
|
||||
|
||||
def getalltransients():
|
||||
query = reduce(lambda a, b: a | b, [Q(id__in=GenericCatalog.objects.get(name=survname).genericsource_set.all()) for survname in SurveyCatName.values()])
|
||||
return ErositaSource.objects.filter(query)
|
||||
|
||||
|
||||
def get_transients(cat, tcat):
|
||||
cat = eviews.create_annotations(cat)
|
||||
cat = eviews.addErositaSrcannotattion(cat)
|
||||
cat = cat.filter(ratio_to_maxxflux > 1.1)
|
||||
removed = ErositaSource.objects.filter(tcat.genericsource_set.exclude(id__in=cat))
|
||||
tcat.genericsource_set.remove(tcat.genericsource_set.all())
|
||||
|
||||
|
||||
def set_transient_data(ecat):
|
||||
ecat = eviews.create_annotations(ecat)
|
||||
ecat = eviews.addErositaSrcannotattion(ecat)
|
||||
ecat = ecat.filter(ratio_to_maxxflux__gt=1.)
|
||||
for src in ecat:
|
||||
try:
|
||||
ErositaTransient.objects.create(src=src, plx=src.plx, plxe=src.plxe,
|
||||
z=src.ssz if src.ssz else src.sdssz,
|
||||
ssep=src.ssep,
|
||||
sname=src.sname if src.sname else "",
|
||||
ztfcl=src.ztfcl if src.ztfcl else "",
|
||||
ztfnm=src.ztfnm if src.ztfnm else "",
|
||||
zsep=src.zsep, plx_sep=src.plx_sep,
|
||||
plx_mag=src.plx_mag,
|
||||
sclass=src.sclass if src.sclass else "",
|
||||
ratio_to_maxxflux=src.ratio_to_maxxflux,
|
||||
ratio_to_minxflux=src.ratio_to_minxflux)
|
||||
except:
|
||||
print(src.name, " already has transints")
|
||||
else:
|
||||
print("transient for %s created" % src.name)
|
2458
srglib/utils.py
Normal file
2458
srglib/utils.py
Normal file
File diff suppressed because it is too large
Load Diff
3
srglib/views.py
Normal file
3
srglib/views.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.shortcuts import render
|
||||
|
||||
# Create your views here.
|
291
srglib/ztf.py
Normal file
291
srglib/ztf.py
Normal file
@@ -0,0 +1,291 @@
|
||||
from astropy.coordinates import SkyCoord # High-level coordinates
|
||||
from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
|
||||
from astropy.coordinates import Angle, Latitude, Longitude # Angles
|
||||
import astropy.units as u
|
||||
import re
|
||||
import json
|
||||
import pandas as pd
|
||||
from astropy_healpix import HEALPix
|
||||
from erotrans.models import EroTransSource, EroTrans
|
||||
from erosurvey.models import NSIDE_PLATES, ORDER_PLATES
|
||||
from heasarc.models import NSIDE_SOURCES, ORDER, HeasarcBase
|
||||
|
||||
from astrobasis.models import GAIADR2
|
||||
from astrobasis.models import ZTFAlert
|
||||
from astrobasis.models import ALeRCE
|
||||
from astrobasis.models import alerce_early_class
|
||||
from monthplan.models import Survey
|
||||
#import datetime
|
||||
|
||||
from datetime import datetime
|
||||
from django.utils.timezone import localdate, localtime, now
|
||||
|
||||
import astropy.units as u
|
||||
from astropy.time.formats import erfa
|
||||
from astropy.time import Time, TimeDelta, TimezoneInfo, TimeFromEpoch
|
||||
|
||||
from astropy_healpix import HEALPix, neighbours
|
||||
from django_pandas.io import read_frame
|
||||
from srglib import correlate_utils, check_transient
|
||||
import numpy as np
|
||||
|
||||
from math import pi, cos, sin
|
||||
from Quaternion import Quat
|
||||
from scipy.spatial.transform import Rotation, Slerp
|
||||
|
||||
|
||||
import logging
|
||||
from pprint import pprint, pformat
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
from srglib.utils import TZ_MSK
|
||||
|
||||
|
||||
def find_ztf_in_survey(ztf):
|
||||
""" Find ZTF alert in ALL surveys by HEALPIX plate"""
|
||||
surveys = Survey.objects.all()
|
||||
for survey in surveys:
|
||||
#total = survey.surveyhealpixplate_set.count()
|
||||
#print(survey.experiment, ztf, total)
|
||||
if survey.surveyhealpixplate_set.filter(healpix=ztf.healpix_plate).exists():
|
||||
#print('add survey')
|
||||
ztf.survey.add(survey)
|
||||
ztf.save()
|
||||
|
||||
|
||||
"""
|
||||
if ztf.survey.exists():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
"""
|
||||
def load_ztf_alerts():
|
||||
""" Loads ZTF alerts
|
||||
|
||||
https://zwickytransientfacility.github.io/ztf-avro-alert/filtering.html
|
||||
|
||||
Based on tests done at IPAC (F. Masci, priv. comm), the following filter delivers a relatively pure sample:
|
||||
|
||||
rb >= 0.65 and
|
||||
nbad = 0 and
|
||||
fwhm <= 5 and
|
||||
elong <= 1.2 and
|
||||
abs(magdiff) <= 0.1
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
time : int
|
||||
Request alerts since this time in seconds
|
||||
|
||||
|
||||
:Author:
|
||||
Roman Krivonos <krivonos@cosmos.ru>
|
||||
|
||||
"""
|
||||
|
||||
format_string='%a, %d %b %Y %H:%M:%S %Z'
|
||||
|
||||
#date_string='Tue, 28 Jan 2020 09:30:30 GMT'
|
||||
#dt = datetime.strptime(date_string, format_string)
|
||||
|
||||
hp = HEALPix(nside=NSIDE_SOURCES,
|
||||
order=ORDER, frame=FK5())
|
||||
|
||||
hp_plates = HEALPix(nside=NSIDE_PLATES,
|
||||
order=ORDER_PLATES, frame=FK5())
|
||||
|
||||
URL = "https://mars.lco.global/"
|
||||
PARAMS={'format':'json', 'rb__gte':0.8,'nbad__lte':0,'fwhm__lte':5,'elong__lte':1.2}
|
||||
r = requests.get(url = URL, params = PARAMS)
|
||||
data = r.json()
|
||||
#print(data['total'])
|
||||
#pprint.pprint(data)
|
||||
|
||||
for item in data['results']:
|
||||
#print(item['lco_id'],item['objectId'],item['candidate']['wall_time'])
|
||||
try:
|
||||
ztf = ZTFAlert.objects.get(lco_id=item['lco_id'])
|
||||
logging.info("ZTF alert ID %d is already loaded, skip" % item['lco_id'])
|
||||
continue
|
||||
except ZTFAlert.DoesNotExist:
|
||||
logging.info("ZTF alert ID %d is not loaded" % item['lco_id'])
|
||||
pass
|
||||
date_string = item['candidate']['wall_time']
|
||||
dt = datetime.strptime(date_string, format_string)
|
||||
ra = item['candidate']['ra']
|
||||
dec = item['candidate']['dec']
|
||||
crd = SkyCoord(ra, dec, frame=FK5(), unit="deg")
|
||||
healpix = hp.skycoord_to_healpix(crd)
|
||||
healpix_plate = hp_plates.skycoord_to_healpix(crd)
|
||||
ztf = ZTFAlert(
|
||||
objectId = item['objectId'],
|
||||
fid = item['candidate']['fid'],
|
||||
lco_id = item['lco_id'],
|
||||
healpix = healpix, healpix_plate = healpix_plate,
|
||||
ra = ra, dec = dec,
|
||||
programid = item['candidate']['programid'],
|
||||
magpsf = item['candidate']['magpsf'],
|
||||
sigmapsf = item['candidate']['sigmapsf'],
|
||||
magap = item['candidate']['magap'],
|
||||
magdiff = item['candidate']['magdiff'],
|
||||
nbad = item['candidate']['nbad'],
|
||||
fwhm = item['candidate']['fwhm'],
|
||||
elong = item['candidate']['elong'],
|
||||
sigmagap = item['candidate']['sigmagap'],
|
||||
wall_time = datetime.strptime(date_string, format_string),
|
||||
diffmaglim = item['candidate']['diffmaglim'],
|
||||
deltamagref = item['candidate']['deltamagref'],
|
||||
deltamaglatest = item['candidate']['deltamaglatest'],
|
||||
rb = item['candidate']['rb'])
|
||||
ztf.save()
|
||||
|
||||
find_ztf_in_survey(ztf)
|
||||
|
||||
|
||||
def load_ztf_alerce_to_django(item):
|
||||
""" Loads ALeRCE ZTF alert to Django
|
||||
|
||||
classearly=None, VS, Asteroids and Bogus objects are not loaded.
|
||||
"""
|
||||
|
||||
nobs = int(item['nobs'])
|
||||
|
||||
classearly = item['classearly']
|
||||
|
||||
if (classearly == None):
|
||||
print("ALeRCE ZTF alert %s: skip classearly=None" % item['oid'])
|
||||
return
|
||||
|
||||
"""
|
||||
if (classearly == 20 or classearly == 21 or classearly == 22):
|
||||
logging.debug("ALeRCE ZTF alert '{0}': skip '{1}'".format(item['oid'],alerce_early_class(classearly)))
|
||||
return
|
||||
"""
|
||||
|
||||
try:
|
||||
ztf = ALeRCE.objects.get(oid=item['oid'])
|
||||
if(nobs > ztf.nobs):
|
||||
""" if nobs is changed, reload this alert, else skip """
|
||||
logging.info("ALeRCE ZTF alert '{0}': reload, new nobs '{1}' > '{2}'".format(item['oid'],ztf.nobs,nobs))
|
||||
ztf.delete()
|
||||
pass
|
||||
else:
|
||||
logging.debug("ALeRCE ZTF alert %s is found, skip" % item['oid'])
|
||||
return
|
||||
except ALeRCE.DoesNotExist:
|
||||
logging.info("ALeRCE ZTF alert %s is not found, loading" % item['oid'])
|
||||
pass
|
||||
|
||||
hp = HEALPix(nside=NSIDE_SOURCES,
|
||||
order=ORDER, frame=FK5())
|
||||
|
||||
hp_plates = HEALPix(nside=NSIDE_PLATES,
|
||||
order=ORDER_PLATES, frame=FK5())
|
||||
|
||||
ra = float(item['meanra'])
|
||||
dec = float(item['meandec'])
|
||||
crd = SkyCoord(ra, dec, frame=FK5(), unit="deg")
|
||||
healpix = hp.skycoord_to_healpix(crd)
|
||||
healpix_plate = hp_plates.skycoord_to_healpix(crd)
|
||||
|
||||
first_tm = Time(item['firstmjd'], format='mjd', scale='utc')
|
||||
last_tm = Time(item['lastmjd'], format='mjd', scale='utc')
|
||||
first_dt = first_tm.to_datetime(timezone=TZ_MSK)
|
||||
last_dt = last_tm.to_datetime(timezone=TZ_MSK)
|
||||
|
||||
ztf = ALeRCE(
|
||||
healpix = healpix,
|
||||
healpix_plate = healpix_plate,
|
||||
classearly = item['classearly'],
|
||||
classrf = item['classrf'],
|
||||
oid = item['oid'],
|
||||
firstdate = first_dt,
|
||||
lastdate = last_dt,
|
||||
firstmjd = item['firstmjd'],
|
||||
lastmjd = item['lastmjd'],
|
||||
mean_magap_g = item['mean_magap_g'],
|
||||
mean_magap_r = item['mean_magap_r'],
|
||||
mean_magpsf_g = item['mean_magpsf_g'],
|
||||
mean_magpsf_r = item['mean_magpsf_r'],
|
||||
dec = dec,
|
||||
ra = ra,
|
||||
nobs = item['nobs'],
|
||||
pclassearly = item['pclassearly'],
|
||||
pclassrf = item['pclassrf'],
|
||||
sigma_magap_g = item['sigma_magap_g'],
|
||||
sigma_magap_r = item['sigma_magap_r'],
|
||||
sigma_magpsf_g = item['sigma_magpsf_g'],
|
||||
sigma_magpsf_r = item['sigma_magpsf_r'],
|
||||
sigmadec = item['sigmadec'],
|
||||
sigmara = item['sigmara'])
|
||||
|
||||
ztf.save()
|
||||
find_ztf_in_survey(ztf)
|
||||
|
||||
def load_ztf_alerce(days, extra=None):
|
||||
classrf = "sn ia"
|
||||
pclassrf = 0.1
|
||||
pclassearly = 0.1
|
||||
sortBy = "firstmjd"
|
||||
nobsmin = 2
|
||||
nobsmax = 40
|
||||
url = "https://ztf.alerce.online/query"
|
||||
|
||||
current_utc = now()
|
||||
current_mjd = Time(current_utc).mjd
|
||||
firstmjd_min = current_mjd - days
|
||||
|
||||
post={
|
||||
"sortBy": sortBy,
|
||||
"records_per_pages": 100,
|
||||
"query_parameters":{
|
||||
"filters": {
|
||||
"nobs": {
|
||||
"min": nobsmin,
|
||||
"max": nobsmax
|
||||
},
|
||||
#"classrf": classrf,
|
||||
#"pclassrf": pclassrf,
|
||||
'pclassearly':pclassearly,
|
||||
},
|
||||
"dates":{
|
||||
"firstmjd":{
|
||||
"min": firstmjd_min
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(extra):
|
||||
post['query_parameters']['filters'].update(extra)
|
||||
|
||||
#pprint.pprint(post)
|
||||
|
||||
r = requests.post(url = url, json = post)
|
||||
data = r.json()
|
||||
for item in data['result']:
|
||||
""" loads first page """
|
||||
packet=data['result'][item]
|
||||
#pprint.pprint(packet['oid'])
|
||||
load_ztf_alerce_to_django(packet)
|
||||
|
||||
total=int(data['total'])
|
||||
num_pages=int(data['num_pages'])
|
||||
page=int(data['page'])
|
||||
|
||||
logging.info("ALeRCE ZTF alerts total '{0}', num_pages '{1}'".format(int(total),int(num_pages)))
|
||||
#return
|
||||
pages = list(range(2,num_pages+1))
|
||||
for page in pages:
|
||||
""" loads all other pages, staring from 2 """
|
||||
post.update( {'page':page,} )
|
||||
logging.debug(pformat(post))
|
||||
r = requests.post(url = url, json = post)
|
||||
data = r.json()
|
||||
for item in data['result']:
|
||||
packet=data['result'][item]
|
||||
#pprint.pprint(packet['oid'])
|
||||
load_ztf_alerce_to_django(packet)
|
Reference in New Issue
Block a user