code formatting

This commit is contained in:
Никита Тырин 2025-05-19 15:09:04 +03:00
parent cf3213a0f9
commit 26f848d274
6 changed files with 373 additions and 417 deletions

View File

@ -1,21 +1,21 @@
# uplim/management/commands/load_survey.py
import numpy as np
from astropy.io import fits
from itertools import islice
from datetime import datetime
from django.core.management.base import BaseCommand
from django.db import transaction
from uplim.models import Pixel
from django.db.models import Max
from itertools import islice
from datetime import datetime
# DEFINE BATCH SIZE AND BATCH
# **************************************************************
#BATCH_SIZE = 1000000
# BATCH_SIZE = 1000000
def batch(iterable, size):
"""
@ -29,8 +29,6 @@ def batch(iterable, size):
yield chunk
class Command(BaseCommand):
help = "Process FITS files and store the data in the database"
@ -39,40 +37,33 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--counts',
type=str,
required=True,
help='Path of the counts file'
"--counts", type=str, required=True, help="Path of the counts file"
)
parser.add_argument(
'--exposure',
type=str,
required=True,
help='Path of the exposure file'
"--exposure", type=str, required=True, help="Path of the exposure file"
)
parser.add_argument(
'--survey_number',
type=int,
"--survey_number",
type=int,
required=True,
help='Integer ID of the survey being read'
help="Integer ID of the survey being read",
)
parser.add_argument(
'--batch_size',
type=int,
"--batch_size",
type=int,
default=1000,
help='Integer number of pixels to be inserted into the database at once'
help="Integer number of pixels to be inserted into the database at once",
)
def handle(self, *args, **options):
# GET FILENAMES FROM ARGUMENTS
# **************************************************************
counts_file = options['counts']
exposure_file = options['exposure']
survey_number = options['survey_number']
BATCH_SIZE = options['batch_size']
counts_file = options["counts"]
exposure_file = options["exposure"]
survey_number = options["survey_number"]
BATCH_SIZE = options["batch_size"]
self.stdout.write(f"\nCounts file:\t{counts_file}")
self.stdout.write(f"Exposure file:\t{exposure_file}")
@ -87,7 +78,6 @@ class Command(BaseCommand):
counts_data = counts_map.ravel()
with fits.open(exposure_file) as hdul:
column_name = "T"
@ -100,49 +90,48 @@ class Command(BaseCommand):
self.stdout.write(f"\nCounts Data Shape:\t{counts_data.shape}")
self.stdout.write(f"Exposure Data Shape:\t{exposure_data.shape}")
total_pixels = counts_data.shape[0]
self.stdout.write(f"\nTotal pixels to insert:\t{total_pixels}")
assert counts_data.shape == exposure_data.shape, "Counts and exposure maps must have the same shape"
assert (
counts_data.shape == exposure_data.shape
), "Counts and exposure maps must have the same shape"
# CREATE THE SURVEY IF IT DOES NOT EXIST
# **************************************************************
# with transaction.atomic():
# survey,created = Survey.objects.get_or_create(number=survey_number)
# if created:
# self.stdout.write(f"Created a new survey instance with number: {survey.number}")
# else:
# self.stdout.write(f"Using existing survey instance with the number: {survey.number}")
# FETCH THE LAST PROCESSED HPID AND CONTINUE FROM IT
# **************************************************************
last_hpid = (
Pixel.objects
.filter(survey=survey_number)
.aggregate(max_hpid=Max('hpid'))['max_hpid']
Pixel.objects.filter(survey=survey_number).aggregate(max_hpid=Max("hpid"))[
"max_hpid"
]
or -1
)
start_index = last_hpid + 1
pixel_generator = (
Pixel(
hpid=i,
counts=int(count),
exposure=float(exposure),
survey=survey_number
survey=survey_number,
)
for i, (count, exposure) in enumerate(zip(counts_data, exposure_data))
if i >= start_index
if i >= start_index
)
total_inserted = start_index
# Process in batches
for pixel_batch in batch(pixel_generator, BATCH_SIZE):
@ -151,8 +140,6 @@ class Command(BaseCommand):
total_inserted += len(pixel_batch)
percentage = total_inserted / total_pixels * 100
timestamp = datetime.now().strftime("%H:%M:%S")
self.stdout.write(
f"[{timestamp}] {percentage:.2f}% inserted"
)
self.stdout.write(f"[{timestamp}] {percentage:.2f}% inserted")
self.stdout.write(f"Inserted a total of {total_inserted} pixels.")

View File

@ -7,19 +7,19 @@
from django.core.management.base import BaseCommand
from django.db import transaction
from uplim.models import Pixel, CatalogSource
import pandas as pd
import healpy as hp
import numpy as np
from astropy.coordinates import SkyCoord
from uplim.models import Pixel, CatalogSource
from itertools import islice
from datetime import datetime
BATCH_SIZE=900
BATCH_SIZE = 900
def batch(iterable, size):
iterable = iter(iterable)
@ -28,157 +28,165 @@ def batch(iterable, size):
if not chunk:
break
yield chunk
class Command(BaseCommand):
help = "Set the 'contaminated' flag for all pixels based on the fluxes in the provided catalog."
# COMMAND LINE ARGUMENTS
# **********************
def add_arguments(self, parser):
parser.add_argument(
'--catalog',
type=str,
required=False,
help='Path to the catalog.dat file'
"--catalog", type=str, required=False, help="Path to the catalog.dat file"
)
# parser.add_argument(
# '--survey',
# type=int,
# required=False,
# help='integer number of the survey to set the flag for'
# )
parser.add_argument(
'--reset',
action='store_true',
"--reset",
action="store_true",
default=False,
help='Reset the contamination flag across all pixels back to False.'
help="Reset the contamination flag across all pixels back to False.",
)
def handle(self, *args, **options):
# RESET BEHAVIOR: SET CONTAMINATION FLAG TO FALSE FOR ALL PIXELS
# **************************************************************
if options['reset']:
if options["reset"]:
self.stdout.write("Resetting the contamination flag...")
Pixel.objects.update(contaminated = False)
Pixel.objects.update(contaminated=False)
self.stdout.write("Done")
return
if not options['catalog']:
if not options["catalog"]:
self.stdout.write("No catalog file provided, exiting")
return
catalog_file = options['catalog']
catalog_file = options["catalog"]
self.stdout.write(f"Catalog file:\t{catalog_file}")
# READ THE CATALOG FILE USING PANDAS READ_FWF
# *******************************************
# Define column positions based on the byte ranges
# Define column positions based on the byte ranges
colspecs = [
(0, 4), # SrcID (1-4)
(5, 26), # Name (6-26)
(0, 4), # SrcID (1-4)
(5, 26), # Name (6-26)
(27, 37), # RAdeg (28-37)
(38, 48), # DEdeg (39-48)
(49, 55), # ePos (50-55)
(56, 63), # Signi (57-63)
(64, 76), # Flux (65-76)
(77, 89), # e_Flux (78-89)
(90, 118), # CName (91-118)
(119, 120),# NewXray (120)
(121, 134) # Type (122-134)
(90, 118), # CName (91-118)
(119, 120), # NewXray (120)
(121, 134), # Type (122-134)
]
# Define column names
colnames = [
"SrcID", "Name", "RAdeg", "DEdeg", "ePos", "Signi", "Flux",
"e_Flux", "CName", "NewXray", "Type"
"SrcID",
"Name",
"RAdeg",
"DEdeg",
"ePos",
"Signi",
"Flux",
"e_Flux",
"CName",
"NewXray",
"Type",
]
# Read the file using the fixed-width format
catalog = pd.read_fwf(catalog_file, colspecs=colspecs, names=colnames)
for col in ['Name', 'CName', 'Type']:
catalog[col] = catalog[col].fillna('')
for col in ["Name", "CName", "Type"]:
catalog[col] = catalog[col].fillna("")
self.stdout.write(str(catalog.head()))
# LOAD THE CATALOG INTO THE DATABASE
# **********************************
existing_srcids = set(
CatalogSource.objects.values_list('srcid', flat=True)
)
existing_srcids = set(CatalogSource.objects.values_list("srcid", flat=True))
to_create = []
for _, row in catalog.iterrows():
srcid = int(row['SrcID'])
srcid = int(row["SrcID"])
if srcid in existing_srcids:
continue
to_create.append(
CatalogSource(
srcid = srcid,
name = row['Name'].strip(),
ra_deg = float(row['RAdeg']),
dec_deg = float(row['DEdeg']),
pos_error = float(row['ePos']),
significance = float(row['Signi']),
flux = float(row['Flux']),
flux_error = float(row['e_Flux']),
catalog_name = row['CName'].strip(),
new_xray = bool(int(row['NewXray'])),
source_type = row['Type'].strip()
srcid=srcid,
name=row["Name"].strip(),
ra_deg=float(row["RAdeg"]),
dec_deg=float(row["DEdeg"]),
pos_error=float(row["ePos"]),
significance=float(row["Signi"]),
flux=float(row["Flux"]),
flux_error=float(row["e_Flux"]),
catalog_name=row["CName"].strip(),
new_xray=bool(int(row["NewXray"])),
source_type=row["Type"].strip(),
)
)
if to_create:
self.stdout.write(f'Inserting {len(to_create)} new catalog rows.')
self.stdout.write(f"Inserting {len(to_create)} new catalog rows.")
for chunk in batch(to_create, BATCH_SIZE):
CatalogSource.objects.bulk_create(chunk, ignore_conflicts=True)
self.stdout.write('Catalog update complete.')
self.stdout.write("Catalog update complete.")
else:
self.stdout.write('All catalog rows already exist in the database.')
self.stdout.write("All catalog rows already exist in the database.")
# hard coded nside and flux-radius mapping
# maybe change that
nside = 4096
npix = hp.nside2npix(nside)
flux_bins = [0, 125, 250, 2000, 20000, np.inf] # define bin edges
mask_radii_deg = [ 0.06, 0.15, 0.5, 0.9, 2.5 ] # corresponding mask radii in degrees
flux_bins = [0, 125, 250, 2000, 20000, np.inf] # define bin edges
mask_radii_deg = [
0.06,
0.15,
0.5,
0.9,
2.5,
] # corresponding mask radii in degrees
# Convert mask radii from degrees to radians (required by query_disc)
mask_radii = [np.radians(r) for r in mask_radii_deg]
# Use pandas.cut to assign each source a bin index (0, 1, or 2)
catalog['flux_bin'] = pd.cut(catalog['Flux'], bins=flux_bins, labels=False)
catalog["flux_bin"] = pd.cut(catalog["Flux"], bins=flux_bins, labels=False)
# manually add and change some sources
manual_additions = pd.DataFrame(
[
{'RAdeg' : 279.9804336, 'DEdeg' : 5.0669542, 'flux_bin' : 3},
{'RAdeg' : 266.5173685, 'DEdeg' : -29.1252321, 'flux_bin' : 3},
{"RAdeg": 279.9804336, "DEdeg": 5.0669542, "flux_bin": 3},
{"RAdeg": 266.5173685, "DEdeg": -29.1252321, "flux_bin": 3},
]
)
catalog = pd.concat([catalog, manual_additions], ignore_index=True)
catalog.loc[catalog['SrcID'] == 1101, 'flux_bin'] = 2
catalog.loc[catalog["SrcID"] == 1101, "flux_bin"] = 2
mask_array = np.ones(npix, dtype=bool)
@ -188,62 +196,58 @@ class Command(BaseCommand):
# process each source in the catalog
for _, row in catalog.iterrows():
ra = row['RAdeg']
dec = row['DEdeg']
src_coord = SkyCoord(
ra, dec, unit = 'deg', frame = 'icrs'
)
ra = row["RAdeg"]
dec = row["DEdeg"]
src_coord = SkyCoord(ra, dec, unit="deg", frame="icrs")
gal = src_coord.galactic
ra, dec = gal.l.deg, gal.b.deg
flux_bin = row['flux_bin'] # 0, 1, or 2
flux_bin = row["flux_bin"] # 0, 1, or 2
# Get the corresponding mask radius (in radians) for this flux bin
radius = mask_radii[flux_bin]
# Convert (ra, dec) to HEALPix spherical coordinates
theta = np.radians(90.0 - dec)
phi = np.radians(ra)
vec = hp.ang2vec(theta, phi)
# Query all pixels within the given radius
# 'inclusive=True' makes sure pixels on the edge are included
pix_indices = hp.query_disc(nside, vec, radius, inclusive=True)
# Mark these pixels as bad (False) in our mask
mask_array[pix_indices] = False
# Add the pixel indices to our set of masked pixels
masked_pixels_set.update(pix_indices)
# Convert the set of masked pixels to a sorted list.
masked_pixels_list = sorted(list(masked_pixels_set))
# print("Number of masked pixels:", len(masked_pixels_list))
self.stdout.write("\nList ready, updating the database...")
if not masked_pixels_list:
self.stdout.write("No pixels marked as contaminated, exiting.")
return
total = len(masked_pixels_list)
updated = 0
self.stdout.write(f'\nUpdating contaminated flag in batches of {BATCH_SIZE}')
self.stdout.write(f"\nUpdating contaminated flag in batches of {BATCH_SIZE}")
for chunk in batch(masked_pixels_list, BATCH_SIZE):
with transaction.atomic():
Pixel.objects.filter(hpid__in=chunk).update(contaminated=True)
updated += len(chunk)
percentage = updated / total * 100
timestamp = datetime.now().strftime("%H:%M:%S")
self.stdout.write(f'[{timestamp}] {updated}/{total} ({percentage:.1f}%) updated')
self.stdout.write(f'\n Marked {updated} pixels as contaminated.')
self.stdout.write(
f"[{timestamp}] {updated}/{total} ({percentage:.1f}%) updated"
)
self.stdout.write(f"\n Marked {updated} pixels as contaminated.")

View File

@ -4,51 +4,47 @@ from django.db import models
from django.db.models import UniqueConstraint
class Pixel(models.Model):
#id = models.AutoField(primary_key=True) # ~200 million pixels for a 4096 survey
# id = models.AutoField(primary_key=True) # ~200 million pixels for a 4096 survey
# no need to set explicitly
# WILL ONLY HOLD 10 SURVEYS AS AN AUTOFIELD (IntegerField, ~2 billion limit)
# BIGAUTOFIELD WILL BE REQUIRED FOR MORE!
survey = models.PositiveSmallIntegerField()
hpid = models.IntegerField(db_index=True) # up to over 200 million
counts = models.IntegerField() # f4, up to ~44k integer: 2 byte too small
exposure = models.FloatField() # f4, up to ~13300 float
survey = models.PositiveSmallIntegerField()
hpid = models.IntegerField(db_index=True) # up to over 200 million
counts = models.IntegerField() # f4, up to ~44k integer: 2 byte too small
exposure = models.FloatField() # f4, up to ~13300 float
contaminated = models.BooleanField(default=False)
def __str__(self):
return f"Pixel {self.id} hpid {self.hpid} (Survey {self.survey.number})"
class CatalogSource(models.Model):
srcid = models.SmallIntegerField(primary_key=True)
name = models.CharField(max_length=21)
ra_deg = models.FloatField()
dec_deg = models.FloatField()
pos_error = models.FloatField()
srcid = models.SmallIntegerField(primary_key=True)
name = models.CharField(max_length=21)
ra_deg = models.FloatField()
dec_deg = models.FloatField()
pos_error = models.FloatField()
significance = models.FloatField()
flux = models.FloatField()
flux_error = models.FloatField()
flux = models.FloatField()
flux_error = models.FloatField()
catalog_name = models.CharField(max_length=28)
new_xray = models.BooleanField(default=False)
source_type = models.CharField(max_length=13)
new_xray = models.BooleanField(default=False)
source_type = models.CharField(max_length=13)

View File

@ -1,8 +1,3 @@
# uplim/serializers.py
from rest_framework import serializers
from uplim.models import Pixel
# class PixelSerializer(serializers.ModelSerializer):
# class Meta:
# model = Pixel
# fields = ('hpid', 'counts', 'exposure', 'contaminated')

View File

@ -1,10 +1,10 @@
# uplim/urls.py
from django.urls import path
from .views import PixelAggregateView, UpperLimitView #, PixelDetailView
from .views import PixelAggregateView, UpperLimitView # , PixelDetailView
urlpatterns = [
#path('pixel/<int:hpid>/', PixelDetailView.as_view(), name='pixel-detail'),
path('pixel-aggregate/', PixelAggregateView.as_view(), name='pixel-aggregate'),
path('upper-limit/', UpperLimitView.as_view(), name='upper-limit'),
# path('pixel/<int:hpid>/', PixelDetailView.as_view(), name='pixel-detail'),
path("pixel-aggregate/", PixelAggregateView.as_view(), name="pixel-aggregate"),
path("upper-limit/", UpperLimitView.as_view(), name="upper-limit"),
]

412
views.py
View File

@ -4,25 +4,24 @@
# search for pixels non-inclusively
import healpy as hp
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
import numpy as np
import scipy.special as sp
from astropy.coordinates import SkyCoord, Angle
from astropy.stats import poisson_conf_interval
from django.db.models import Sum
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import get_object_or_404
from uplim.models import Pixel, CatalogSource
# SANITIZE RESPONSE DATA BEFORE JSON CONVERSION FOR DEBUGGING NANS
# now NaNs are converted to 'null' beforehand
# ****************************************************************
def sanitize(obj):
if isinstance(obj, dict):
return {k: sanitize(v) for k, v in obj.items()}
@ -52,17 +51,17 @@ def parse_survey_param(raw):
return sorted(surveys)
# PIXEL VIEW (MOSTLY FOR TESTING)
# add healpix indices into the output
# **************************************************************
class PixelAggregateView(APIView):
def get(self, request):
# GET PARAMETERS FROM THE QUERY
# **************************************************************
raw_pixel = request.query_params.get("pixel")
raw_pixel = request.query_params.get("pixel")
raw_survey = request.query_params.get("survey")
# 400 BADREQUEST
@ -70,7 +69,7 @@ class PixelAggregateView(APIView):
if raw_pixel is None or raw_survey is None:
return Response(
{"detail": "Both `pixel` and `survey` parameters are required."},
status=status.HTTP_400_BAD_REQUEST
status=status.HTTP_400_BAD_REQUEST,
)
# FILTER THE INPUTS
@ -80,7 +79,7 @@ class PixelAggregateView(APIView):
except ValueError:
return Response(
{"detail": "`pixel` must be an integer."},
status=status.HTTP_400_BAD_REQUEST
status=status.HTTP_400_BAD_REQUEST,
)
try:
@ -88,44 +87,33 @@ class PixelAggregateView(APIView):
except ValueError:
return Response(
{"detail": "Malformed `survey`; use N, N,M or N-M formats."},
status=status.HTTP_400_BAD_REQUEST
status=status.HTTP_400_BAD_REQUEST,
)
# FILTER AND AGGREGATE
# **************************************************************
qs = Pixel.objects.filter(
hpid=hpid,
survey__in=survey_numbers
)
qs = Pixel.objects.filter(hpid=hpid, survey__in=survey_numbers)
if not qs.exists():
# no matching pixel(s) → 404
get_object_or_404(
Pixel,
hpid=hpid,
survey__in=survey_numbers
)
get_object_or_404(Pixel, hpid=hpid, survey__in=survey_numbers)
aggregates = qs.aggregate(
#pixel_hpid=hpid,
#survey_number=survey_numbers,
# pixel_hpid=hpid,
# survey_number=survey_numbers,
total_counts=Sum("counts"),
total_exposure=Sum("exposure")
total_exposure=Sum("exposure"),
)
plusdata = {
'pixel_hpid' : hpid,
'surveys' : survey_numbers
}
plusdata = {"pixel_hpid": hpid, "surveys": survey_numbers}
result = {**aggregates, **plusdata}
# RETURN THE SUMS
# **************************************************************
return Response(result, status=status.HTTP_200_OK)
# UPPER LIMIT COMPUTATION VIEW
# **************************************************************
@ -134,317 +122,303 @@ class UpperLimitView(APIView):
"""
Calculate confidence bounds based on aperture photometry using classic and bayesian methods
"""
def get(self, request):
# GET PARAMETERS FROM THE REQUEST
# **************************************************************
try:
ra = float(request.query_params.get('ra'))
dec = float(request.query_params.get('dec'))
confidence_level = float(request.query_params.get('cl'))
ra = float(request.query_params.get("ra"))
dec = float(request.query_params.get("dec"))
confidence_level = float(request.query_params.get("cl"))
except (TypeError, ValueError):
return Response(
{"error": "Invalud parameters, provide RA, DEC, and CL"},
status = status.HTTP_400_BAD_REQUEST
status=status.HTTP_400_BAD_REQUEST,
)
# ── NEW: pull & parse survey selection ──
raw_survey = request.query_params.get('survey')
raw_survey = request.query_params.get("survey")
if raw_survey is None:
return Response(
{"error": "Missing required `survey` parameter (e.g. ?survey=1,3-5)"},
status=status.HTTP_400_BAD_REQUEST
status=status.HTTP_400_BAD_REQUEST,
)
try:
survey_numbers = parse_survey_param(raw_survey)
except ValueError:
return Response(
{"error": "Malformed `survey`; use formats like 1, 2-5, or 1,3-4"},
status=status.HTTP_400_BAD_REQUEST
)
{"error": "Malformed `survey`; use formats like 1, 2-5, or 1,3-4"},
status=status.HTTP_400_BAD_REQUEST,
)
# hp = HEALPix(
# nside = 4096,
# order = 'ring',
# frame = 'icrs'
# )
# CREATE SKYCOORD, CONVERT TO GALACTIC BECAUSE THE HEALPIX
# MAP ITSELF WAS MADE IN GALACTIC COORDINATES
# **************************************************************
src_coord = SkyCoord(
ra, dec, unit = 'deg', frame = 'icrs'
)
src_coord = SkyCoord(ra, dec, unit="deg", frame="icrs")
gal = src_coord.galactic
src_vec = hp.ang2vec(gal.l.deg, gal.b.deg, lonlat = True)
src_vec = hp.ang2vec(gal.l.deg, gal.b.deg, lonlat=True)
# DEFINE APERTURE AND ANNULUS RADII
# **************************************************************
aperture_radius = 71 # radius of the aperture in arc seconds
aperture_radius = 71 # radius of the aperture in arc seconds
# HPD ~48 arcseconds
# 90% ~100 arcseconds
annulus_inner = 142 # 2 * aperture_radius
annulus_outer = 284 # 4 * aperture_radius
annulus_inner = 142 # 2 * aperture_radius
annulus_outer = 284 # 4 * aperture_radius
# FETCH PIXEL DATA DEFINED VIA HP.QUERY_DISC (INCLUSIVE=FALSE)
# **************************************************************
source_pixel_list = hp.query_disc(
nside = 4096,
vec = src_vec,
inclusive = False,
nest = False,
radius = (aperture_radius * u.arcsecond).to(u.radian).value
nside=4096,
vec=src_vec,
inclusive=False,
nest=False,
radius=(aperture_radius * u.arcsecond).to(u.radian).value,
)
inner_pixel_list = hp.query_disc(
nside = 4096,
vec = src_vec,
inclusive = False,
nest = False,
radius = (annulus_inner * u.arcsecond).to(u.radian).value
nside=4096,
vec=src_vec,
inclusive=False,
nest=False,
radius=(annulus_inner * u.arcsecond).to(u.radian).value,
)
outer_pixel_list = hp.query_disc(
nside = 4096,
vec = src_vec,
inclusive = False,
nest = False,
radius = (annulus_outer * u.arcsecond).to(u.radian).value
nside=4096,
vec=src_vec,
inclusive=False,
nest=False,
radius=(annulus_outer * u.arcsecond).to(u.radian).value,
)
annulus_pixel_list = [
item for item in outer_pixel_list if item not in inner_pixel_list
]
source_pixels = Pixel.objects.filter(
hpid__in = source_pixel_list,
survey__in = survey_numbers
)
annulus_pixels = Pixel.objects.filter(
hpid__in = annulus_pixel_list,
survey__in = survey_numbers
)
# check contamination
contamination = (
source_pixels.filter(contaminated=True).exists() or
annulus_pixels.filter(contaminated=True).exists()
hpid__in=source_pixel_list, survey__in=survey_numbers
)
annulus_pixels = Pixel.objects.filter(
hpid__in=annulus_pixel_list, survey__in=survey_numbers
)
# check contamination
contamination = (
source_pixels.filter(contaminated=True).exists()
or annulus_pixels.filter(contaminated=True).exists()
)
if not source_pixels.exists() and not annulus_pixels.exists():
return Response(
{"detail": "No pixel data for the given survey selection."},
status=status.HTTP_404_NOT_FOUND
)
status=status.HTTP_404_NOT_FOUND,
)
# COMPUTE COUNTS, BACKGROUND ESTIMATE, EXPOSURE
# **************************************************************
N = sum(obj.counts for obj in source_pixels)
Nnpix = len(source_pixels)
Bcounts = sum(obj.counts for obj in annulus_pixels)
Bnpix = len(annulus_pixels)
B = Bcounts / Bnpix * Nnpix
tsum = sum(obj.exposure for obj in source_pixels)
t = tsum / Nnpix
# CONSTANTS
# **************************************************************
#EEF = .9 # eclosed energy fraction, .5 for hpd, .9 for w90
#ECF = 4e-11 # energy conversion factor
EEF = .80091 # use values from the paper
# EEF = .9 # eclosed energy fraction, .5 for hpd, .9 for w90
# ECF = 4e-11 # energy conversion factor
EEF = 0.80091 # use values from the paper
ECF = 3.3423184e-11
# BAYESIAN IMPLEMENTATION VIA POISSON_CONF_INTERVAL
# **************************************************************
low, high = poisson_conf_interval(
n = N,
background = B,
interval = 'kraft-burrows-nousek',
confidence_level=confidence_level
n=N,
background=B,
interval="kraft-burrows-nousek",
confidence_level=confidence_level,
)
bayesian_count_ul = high
bayesian_count_ll = low
bayesian_rate_ul = bayesian_count_ul / t / EEF # count rate limits
bayesian_rate_ul = bayesian_count_ul / t / EEF # count rate limits
bayesian_rate_ll = bayesian_count_ll / t / EEF
bayesian_flux_ul = bayesian_rate_ul * ECF # flux limits
bayesian_flux_ul = bayesian_rate_ul * ECF # flux limits
bayesian_flux_ll = bayesian_rate_ll * ECF
# CLASSICAL IMPLEMENTATION VIA GAMMAINCCINV
# ****************************************************************
classic_count_ul = sp.gammainccinv(N+1, 1 - confidence_level) - B
classic_count_ul = sp.gammainccinv(N + 1, 1 - confidence_level) - B
classic_count_ll = sp.gammainccinv(N, confidence_level) - B
if not np.isfinite(classic_count_ll) or classic_count_ll < 0:
classic_count_ll = 0.0
classic_rate_ul = classic_count_ul / t / EEF # count rate limits
classic_rate_ll = classic_count_ll / t / EEF
classic_flux_ul = classic_rate_ul * ECF # flux limits
classic_rate_ul = classic_count_ul / t / EEF # count rate limits
classic_rate_ll = classic_count_ll / t / EEF
classic_flux_ul = classic_rate_ul * ECF # flux limits
classic_flux_ll = classic_rate_ll * ECF
# FLUX ESTIMATION
# ****************************************************************
S = N - B # counts as simply counts within aperture
# with the background estimate subtracted
CR = S / t / EEF # source count rate
BR = B / t # background rate within aperture
FL = CR * ECF # conversion to flux
Flux = max(FL, 0) # flux cannot be lower than zero
S = N - B # counts as simply counts within aperture
# with the background estimate subtracted
CR = S / t / EEF # source count rate
BR = B / t # background rate within aperture
FL = CR * ECF # conversion to flux
Flux = max(FL, 0) # flux cannot be lower than zero
# NEARBY SOURCES CHECK
# ****************************************************************
radius_as = 120
radius_deg = radius_as / 3600
dec_min = max(dec - radius_deg, -90)
dec_max = min(dec + radius_deg, 90)
dec_max = min(dec + radius_deg, 90)
# cheap belt query
belt_sources = CatalogSource.objects.filter(
dec_deg__gte = dec_min,
dec_deg__lte = dec_max
dec_deg__gte=dec_min, dec_deg__lte=dec_max
)
center_coord = SkyCoord(ra, dec, unit='deg')
center_coord = SkyCoord(ra, dec, unit="deg")
nearby_sources = []
#refine belt to circular region using astropy separation
# refine belt to circular region using astropy separation
for catsrc in belt_sources:
catsrc_coord = SkyCoord(catsrc.ra_deg, catsrc.dec_deg, unit='deg')
catsrc_coord = SkyCoord(catsrc.ra_deg, catsrc.dec_deg, unit="deg")
if center_coord.separation(catsrc_coord).deg <= radius_deg:
nearby_sources.append(
{
'srcid' : catsrc.srcid,
'name' : catsrc.name,
'ra_deg' : catsrc.ra_deg,
'dec_deg' : catsrc.dec_deg,
'pos_error' : catsrc.pos_error,
'significance' : catsrc.significance,
'flux' : catsrc.flux,
'flux_error' : catsrc.flux_error,
'catalog_name' : catsrc.catalog_name,
'new_xray' : catsrc.new_xray,
'source_type' : catsrc.source_type
"srcid": catsrc.srcid,
"name": catsrc.name,
"ra_deg": catsrc.ra_deg,
"dec_deg": catsrc.dec_deg,
"pos_error": catsrc.pos_error,
"significance": catsrc.significance,
"flux": catsrc.flux,
"flux_error": catsrc.flux_error,
"catalog_name": catsrc.catalog_name,
"new_xray": catsrc.new_xray,
"source_type": catsrc.source_type,
}
)
# SQUARE REGION IMAGE SERVING
# ****************************************************************
# get hpids within a circle of radius sqrt(2) * outer annulus radius
# get hpids within a circle of radius sqrt(2) * outer annulus radius
map_radius = annulus_outer * np.sqrt(2)
map_pixel_list = hp.query_disc(
nside = 4096,
vec = src_vec,
inclusive = False,
nest = False,
radius = (map_radius * u.arcsecond).to(u.radian).value
nside=4096,
vec=src_vec,
inclusive=False,
nest=False,
radius=(map_radius * u.arcsecond).to(u.radian).value,
)
# fetch those pixels for the requested surveys
# summing counts and sorting by hpid
map_pixels_qs = (
Pixel.objects
.filter(hpid__in = map_pixel_list, survey__in = survey_numbers)
.values('hpid')
.annotate(counts=Sum('counts'))
.order_by('hpid')
Pixel.objects.filter(hpid__in=map_pixel_list, survey__in=survey_numbers)
.values("hpid")
.annotate(counts=Sum("counts"))
.order_by("hpid")
)
# turn the queryset to a list
map_pixels_list = list(map_pixels_qs)
# get lists of healpix indices and count values
map_healpix_list = [d['hpid'] for d in map_pixels_list]
map_counts_list = [d['counts'] for d in map_pixels_list]
map_healpix_list = [d["hpid"] for d in map_pixels_list]
map_counts_list = [d["counts"] for d in map_pixels_list]
# set map nside
map_nside = 4096
# set map order
map_order = 'ring'
map_order = "ring"
# assemble the result dict
map_dict = {
'healpix' : map_healpix_list,
'counts' : map_counts_list,
'nside' : map_nside,
'order' : map_order,
'radius_as' : map_radius
"healpix": map_healpix_list,
"counts": map_counts_list,
"nside": map_nside,
"order": map_order,
"radius_as": map_radius,
}
# RESULT JSON
# ****************************************************************
result = {
# frequentist limits
'ClassicUpperLimit' : classic_count_ul,
'ClassicLowerLimit' : classic_count_ll,
'ClassicCountRateUpperLimit' : classic_rate_ul,
'ClassicCountRateLowerLimit' : classic_rate_ll,
'ClassicFluxUpperLimit' : classic_flux_ul,
'ClassicFluxLowerLimit' : classic_flux_ll,
"ClassicUpperLimit": classic_count_ul,
"ClassicLowerLimit": classic_count_ll,
"ClassicCountRateUpperLimit": classic_rate_ul,
"ClassicCountRateLowerLimit": classic_rate_ll,
"ClassicFluxUpperLimit": classic_flux_ul,
"ClassicFluxLowerLimit": classic_flux_ll,
# bayesian limits
'BayesianUpperLimit' : bayesian_count_ul,
'BayesianLowerLimit' : bayesian_count_ll,
'BayesianCountRateUpperLimit' : bayesian_rate_ul,
'BayesianCountRateLowerLimit' : bayesian_rate_ll,
'BayesianFluxUpperLimit' : bayesian_flux_ul,
'BayesianFluxLowerLimit' : bayesian_flux_ll,
"BayesianUpperLimit": bayesian_count_ul,
"BayesianLowerLimit": bayesian_count_ll,
"BayesianCountRateUpperLimit": bayesian_rate_ul,
"BayesianCountRateLowerLimit": bayesian_rate_ll,
"BayesianFluxUpperLimit": bayesian_flux_ul,
"BayesianFluxLowerLimit": bayesian_flux_ll,
# flux 'center value' estimate
'FluxEstimate' : Flux,
"FluxEstimate": Flux,
# raw data
'ApertureCounts' : N,
'ApertureBackgroundCounts' : B,
'SourceCounts' : S,
'Exposure' : t,
"ApertureCounts": N,
"ApertureBackgroundCounts": B,
"SourceCounts": S,
"Exposure": t,
# count rates
'SourceRate' : CR,
'BackgroundRate' : BR,
"SourceRate": CR,
"BackgroundRate": BR,
# contamination
'Contamination' : contamination,
'NearbySources' : nearby_sources,
"Contamination": contamination,
"NearbySources": nearby_sources,
# count map for the frontend image
'CountMap' : map_dict
"CountMap": map_dict,
}
clean = sanitize(result) # calling sanitize() to convert NaN to null
return Response(clean, status=status.HTTP_200_OK)
clean = sanitize(result) # calling sanitize() to convert NaN to null
return Response(clean, status=status.HTTP_200_OK)