Added status, error message fields, contamination map to response

This commit is contained in:
Никита Тырин 2025-05-26 15:26:15 +03:00
parent cc410f58dc
commit a36163f731

189
views.py
View File

@ -12,7 +12,7 @@ from astropy.stats import poisson_conf_interval
from collections import defaultdict
from django.db.models import Sum
from django.db.models import Sum, Max
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
@ -221,118 +221,132 @@ class UpperLimitView(APIView):
# exclude contaminated pixels from the background calculations
annulus_pixels = annulus_pixels.exclude(contaminated=True)
status_int = 0
error_message = ""
if not source_pixels.exists() or not annulus_pixels.exists():
return Response(
{
"detail": "No background and/or source pixel data for the given survey selection."
},
status=status.HTTP_404_NOT_FOUND,
)
status_int = 1 # status 1 if there are no source or bg pixels
# COMPUTE COUNTS, BACKGROUND ESTIMATE, EXPOSURE
# **************************************************************
try:
# summing counts across all surveys
N = sum(obj.counts for obj in source_pixels)
# summing counts across all surveys
N = sum(obj.counts for obj in source_pixels)
Nnpix = len(source_pixels)
Nnpix = len(source_pixels)
Bcounts = sum(obj.counts for obj in annulus_pixels)
Bcounts = sum(obj.counts for obj in annulus_pixels)
Bnpix = len(annulus_pixels)
Bnpix = len(annulus_pixels)
B = Bcounts / Bnpix * Nnpix
B = Bcounts / Bnpix * Nnpix
# create a dict of exposures keyed by survey
t_by_survey = defaultdict(list)
# create a dict of exposures keyed by survey
t_by_survey = defaultdict(list)
for pixel in source_pixels:
t_by_survey[pixel.survey].append(pixel.exposure)
for pixel in source_pixels:
t_by_survey[pixel.survey].append(pixel.exposure)
# create and populate a list of average exposures per survey
survey_averages = []
# create and populate a list of average exposures per survey
survey_averages = []
for survey_id, exposures in t_by_survey.items():
average_exposure = sum(exposures) / len(exposures)
survey_averages.append(average_exposure)
for survey_id, exposures in t_by_survey.items():
average_exposure = sum(exposures) / len(exposures)
survey_averages.append(average_exposure)
# sum them up across surveys
t = sum(survey_averages)
# sum them up across surveys
t = sum(survey_averages)
# CONSTANTS
# **************************************************************
# CONSTANTS
# **************************************************************
# EEF = .9 # eclosed energy fraction, .5 for hpd, .9 for w90
# ECF = 4e-11 # energy conversion factor
# EEF = .9 # eclosed energy fraction, .5 for hpd, .9 for w90
# ECF = 4e-11 # energy conversion factor
EEF = 0.80091 # use values from the paper
ECF = 3.3423184e-11
EEF = 0.80091 # use values from the paper
ECF = 3.3423184e-11
# BAYESIAN IMPLEMENTATION VIA POISSON_CONF_INTERVAL
# **************************************************************
# BAYESIAN IMPLEMENTATION VIA POISSON_CONF_INTERVAL
# **************************************************************
# double sided interval
low, high = poisson_conf_interval(
n=N,
background=B,
interval="kraft-burrows-nousek",
confidence_level=confidence_level,
)
# because poisson_conf_interval returns lists
bayesian_count_ul = high[0]
bayesian_count_ll = low[0]
bayesian_count_UL = (
sp.gammaincinv(
N + 1, confidence_level * sp.gammaincc(N + 1, B) + sp.gammainc(N + 1, B)
# double sided interval
low, high = poisson_conf_interval(
n=N,
background=B,
interval="kraft-burrows-nousek",
confidence_level=confidence_level,
)
- B
)
bayesian_rate_ul = bayesian_count_ul / t / EEF # count rate limits
bayesian_rate_ll = bayesian_count_ll / t / EEF
bayesian_rate_UL = bayesian_count_UL / t / EEF
# because poisson_conf_interval returns lists
bayesian_count_ul = high[0]
bayesian_count_ll = low[0]
bayesian_flux_ul = bayesian_rate_ul * ECF # flux limits
bayesian_flux_ll = bayesian_rate_ll * ECF
bayesian_flux_UL = bayesian_rate_UL * ECF
bayesian_count_UL = (
sp.gammaincinv(
N + 1,
confidence_level * sp.gammaincc(N + 1, B) + sp.gammainc(N + 1, B),
)
- B
)
# CLASSICAL IMPLEMENTATION VIA GAMMAINCCINV
# ****************************************************************
bayesian_rate_ul = bayesian_count_ul / t / EEF # count rate limits
bayesian_rate_ll = bayesian_count_ll / t / EEF
bayesian_rate_UL = bayesian_count_UL / t / EEF
# confidence level for the double sided interval
cl2 = (1 + confidence_level) / 2
# double sided interval
classic_count_ul = sp.gammainccinv(N + 1, 1 - cl2) - B
classic_count_ll = sp.gammainccinv(N, cl2) - B
# one sided interval
classic_count_UL = sp.gammainccinv(N + 1, 1 - confidence_level) - B
bayesian_flux_ul = bayesian_rate_ul * ECF # flux limits
bayesian_flux_ll = bayesian_rate_ll * ECF
bayesian_flux_UL = bayesian_rate_UL * ECF
if not np.isfinite(classic_count_ll) or classic_count_ll < 0:
classic_count_ll = 0.0
# CLASSICAL IMPLEMENTATION VIA GAMMAINCCINV
# ****************************************************************
classic_rate_ul = classic_count_ul / t / EEF # count rate limits
classic_rate_ll = classic_count_ll / t / EEF
classic_rate_UL = classic_count_UL / t / EEF
# confidence level for the double sided interval
cl2 = (1 + confidence_level) / 2
# double sided interval
classic_count_ul = sp.gammainccinv(N + 1, 1 - cl2) - B
classic_count_ll = sp.gammainccinv(N, cl2) - B
# one sided interval
classic_count_UL = sp.gammainccinv(N + 1, 1 - confidence_level) - B
classic_flux_ul = classic_rate_ul * ECF # flux limits
classic_flux_ll = classic_rate_ll * ECF
classic_flux_UL = classic_rate_UL * ECF
if not np.isfinite(classic_count_ll) or classic_count_ll < 0:
classic_count_ll = 0.0
# FLUX ESTIMATION
# ****************************************************************
classic_rate_ul = classic_count_ul / t / EEF # count rate limits
classic_rate_ll = classic_count_ll / t / EEF
classic_rate_UL = classic_count_UL / t / EEF
S = N - B # counts as simply counts within aperture
# with the background estimate subtracted
classic_flux_ul = classic_rate_ul * ECF # flux limits
classic_flux_ll = classic_rate_ll * ECF
classic_flux_UL = classic_rate_UL * ECF
CR = S / t / EEF # source count rate
# FLUX ESTIMATION
# ****************************************************************
BR = B / t # background rate within aperture
S = N - B # counts as simply counts within aperture
# with the background estimate subtracted
FL = CR * ECF # conversion to flux
CR = S / t / EEF # source count rate
Flux = max(FL, 0) # flux cannot be lower than zero
BR = B / t # background rate within aperture
FL = CR * ECF # conversion to flux
Flux = max(FL, 0) # flux cannot be lower than zero
# handle exceptions: write error text to the response
# zero out all math results
except Exception as e:
error_message = str(e)
N = Nnpix = Bcounts = Bnpix = B = t = S = CR = BR = 0.0
Flux = 0.0
classic_count_ul = classic_count_ll = classic_count_UL = 0.0
classic_rate_ul = classic_rate_ll = classic_rate_UL = 0.0
classic_flux_ul = classic_flux_ll = classic_flux_UL = 0.0
bayesian_count_ul = bayesian_count_ll = bayesian_count_UL = 0.0
bayesian_rate_ul = bayesian_rate_ll = bayesian_rate_UL = 0.0
bayesian_flux_ul = bayesian_flux_ll = bayesian_flux_UL = 0.0
# NEARBY SOURCES CHECK
# ****************************************************************
@ -372,7 +386,7 @@ class UpperLimitView(APIView):
}
)
# SQUARE REGION IMAGE SERVING
# REGION IMAGE SERVING
# ****************************************************************
# default value if not specified in the query
@ -396,7 +410,10 @@ class UpperLimitView(APIView):
map_pixels_qs = (
Pixel.objects.filter(hpid__in=map_pixel_list, survey__in=survey_numbers)
.values("hpid")
.annotate(counts=Sum("counts"))
.annotate(
counts=Sum("counts"),
contaminated=Max("contaminated"),
)
.order_by("hpid")
)
@ -406,6 +423,7 @@ class UpperLimitView(APIView):
# get lists of healpix indices and count values
map_healpix_list = [d["hpid"] for d in map_pixels_list]
map_counts_list = [d["counts"] for d in map_pixels_list]
map_contaminated_list = [d["contaminated"] for d in map_pixels_list]
# set map nside
map_nside = 4096
@ -417,6 +435,7 @@ class UpperLimitView(APIView):
map_dict = {
"healpix": map_healpix_list,
"counts": map_counts_list,
"contaminated": map_contaminated_list,
"nside": map_nside,
"order": map_order,
"radius_as": map_radius,
@ -426,6 +445,10 @@ class UpperLimitView(APIView):
# ****************************************************************
result = {
"Status": status_int,
# 0 OK
# 1 either source or bg pixels missing
"ErrorMessage": error_message,
# frequentist limits
"ClassicUpperLimit": classic_count_ul,
"ClassicLowerLimit": classic_count_ll,