code formatting

This commit is contained in:
Никита Тырин 2025-05-19 15:09:04 +03:00
parent cf3213a0f9
commit 26f848d274
6 changed files with 373 additions and 417 deletions

View File

@ -1,21 +1,21 @@
# uplim/management/commands/load_survey.py # uplim/management/commands/load_survey.py
import numpy as np import numpy as np
from astropy.io import fits from astropy.io import fits
from itertools import islice
from datetime import datetime
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from django.db import transaction from django.db import transaction
from uplim.models import Pixel from uplim.models import Pixel
from django.db.models import Max from django.db.models import Max
from itertools import islice
from datetime import datetime
# DEFINE BATCH SIZE AND BATCH # DEFINE BATCH SIZE AND BATCH
# ************************************************************** # **************************************************************
#BATCH_SIZE = 1000000 # BATCH_SIZE = 1000000
def batch(iterable, size): def batch(iterable, size):
""" """
@ -29,8 +29,6 @@ def batch(iterable, size):
yield chunk yield chunk
class Command(BaseCommand): class Command(BaseCommand):
help = "Process FITS files and store the data in the database" help = "Process FITS files and store the data in the database"
@ -39,40 +37,33 @@ class Command(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument( parser.add_argument(
'--counts', "--counts", type=str, required=True, help="Path of the counts file"
type=str,
required=True,
help='Path of the counts file'
) )
parser.add_argument( parser.add_argument(
'--exposure', "--exposure", type=str, required=True, help="Path of the exposure file"
type=str,
required=True,
help='Path of the exposure file'
) )
parser.add_argument( parser.add_argument(
'--survey_number', "--survey_number",
type=int, type=int,
required=True, required=True,
help='Integer ID of the survey being read' help="Integer ID of the survey being read",
) )
parser.add_argument( parser.add_argument(
'--batch_size', "--batch_size",
type=int, type=int,
default=1000, default=1000,
help='Integer number of pixels to be inserted into the database at once' help="Integer number of pixels to be inserted into the database at once",
) )
def handle(self, *args, **options): def handle(self, *args, **options):
# GET FILENAMES FROM ARGUMENTS # GET FILENAMES FROM ARGUMENTS
# ************************************************************** # **************************************************************
counts_file = options['counts'] counts_file = options["counts"]
exposure_file = options['exposure'] exposure_file = options["exposure"]
survey_number = options['survey_number'] survey_number = options["survey_number"]
BATCH_SIZE = options['batch_size'] BATCH_SIZE = options["batch_size"]
self.stdout.write(f"\nCounts file:\t{counts_file}") self.stdout.write(f"\nCounts file:\t{counts_file}")
self.stdout.write(f"Exposure file:\t{exposure_file}") self.stdout.write(f"Exposure file:\t{exposure_file}")
@ -87,7 +78,6 @@ class Command(BaseCommand):
counts_data = counts_map.ravel() counts_data = counts_map.ravel()
with fits.open(exposure_file) as hdul: with fits.open(exposure_file) as hdul:
column_name = "T" column_name = "T"
@ -104,7 +94,9 @@ class Command(BaseCommand):
total_pixels = counts_data.shape[0] total_pixels = counts_data.shape[0]
self.stdout.write(f"\nTotal pixels to insert:\t{total_pixels}") self.stdout.write(f"\nTotal pixels to insert:\t{total_pixels}")
assert counts_data.shape == exposure_data.shape, "Counts and exposure maps must have the same shape" assert (
counts_data.shape == exposure_data.shape
), "Counts and exposure maps must have the same shape"
# CREATE THE SURVEY IF IT DOES NOT EXIST # CREATE THE SURVEY IF IT DOES NOT EXIST
# ************************************************************** # **************************************************************
@ -122,27 +114,24 @@ class Command(BaseCommand):
# ************************************************************** # **************************************************************
last_hpid = ( last_hpid = (
Pixel.objects Pixel.objects.filter(survey=survey_number).aggregate(max_hpid=Max("hpid"))[
.filter(survey=survey_number) "max_hpid"
.aggregate(max_hpid=Max('hpid'))['max_hpid'] ]
or -1 or -1
) )
start_index = last_hpid + 1 start_index = last_hpid + 1
pixel_generator = ( pixel_generator = (
Pixel( Pixel(
hpid=i, hpid=i,
counts=int(count), counts=int(count),
exposure=float(exposure), exposure=float(exposure),
survey=survey_number survey=survey_number,
) )
for i, (count, exposure) in enumerate(zip(counts_data, exposure_data)) for i, (count, exposure) in enumerate(zip(counts_data, exposure_data))
if i >= start_index if i >= start_index
) )
total_inserted = start_index total_inserted = start_index
# Process in batches # Process in batches
for pixel_batch in batch(pixel_generator, BATCH_SIZE): for pixel_batch in batch(pixel_generator, BATCH_SIZE):
@ -151,8 +140,6 @@ class Command(BaseCommand):
total_inserted += len(pixel_batch) total_inserted += len(pixel_batch)
percentage = total_inserted / total_pixels * 100 percentage = total_inserted / total_pixels * 100
timestamp = datetime.now().strftime("%H:%M:%S") timestamp = datetime.now().strftime("%H:%M:%S")
self.stdout.write( self.stdout.write(f"[{timestamp}] {percentage:.2f}% inserted")
f"[{timestamp}] {percentage:.2f}% inserted"
)
self.stdout.write(f"Inserted a total of {total_inserted} pixels.") self.stdout.write(f"Inserted a total of {total_inserted} pixels.")

View File

@ -7,19 +7,19 @@
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from django.db import transaction from django.db import transaction
from uplim.models import Pixel, CatalogSource
import pandas as pd import pandas as pd
import healpy as hp import healpy as hp
import numpy as np import numpy as np
from astropy.coordinates import SkyCoord from astropy.coordinates import SkyCoord
from uplim.models import Pixel, CatalogSource
from itertools import islice from itertools import islice
from datetime import datetime from datetime import datetime
BATCH_SIZE=900
BATCH_SIZE = 900
def batch(iterable, size): def batch(iterable, size):
iterable = iter(iterable) iterable = iter(iterable)
@ -39,10 +39,7 @@ class Command(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument( parser.add_argument(
'--catalog', "--catalog", type=str, required=False, help="Path to the catalog.dat file"
type=str,
required=False,
help='Path to the catalog.dat file'
) )
# parser.add_argument( # parser.add_argument(
@ -53,10 +50,10 @@ class Command(BaseCommand):
# ) # )
parser.add_argument( parser.add_argument(
'--reset', "--reset",
action='store_true', action="store_true",
default=False, default=False,
help='Reset the contamination flag across all pixels back to False.' help="Reset the contamination flag across all pixels back to False.",
) )
def handle(self, *args, **options): def handle(self, *args, **options):
@ -64,20 +61,20 @@ class Command(BaseCommand):
# RESET BEHAVIOR: SET CONTAMINATION FLAG TO FALSE FOR ALL PIXELS # RESET BEHAVIOR: SET CONTAMINATION FLAG TO FALSE FOR ALL PIXELS
# ************************************************************** # **************************************************************
if options['reset']: if options["reset"]:
self.stdout.write("Resetting the contamination flag...") self.stdout.write("Resetting the contamination flag...")
Pixel.objects.update(contaminated = False) Pixel.objects.update(contaminated=False)
self.stdout.write("Done") self.stdout.write("Done")
return return
if not options['catalog']: if not options["catalog"]:
self.stdout.write("No catalog file provided, exiting") self.stdout.write("No catalog file provided, exiting")
return return
catalog_file = options['catalog'] catalog_file = options["catalog"]
self.stdout.write(f"Catalog file:\t{catalog_file}") self.stdout.write(f"Catalog file:\t{catalog_file}")
@ -86,70 +83,77 @@ class Command(BaseCommand):
# Define column positions based on the byte ranges # Define column positions based on the byte ranges
colspecs = [ colspecs = [
(0, 4), # SrcID (1-4) (0, 4), # SrcID (1-4)
(5, 26), # Name (6-26) (5, 26), # Name (6-26)
(27, 37), # RAdeg (28-37) (27, 37), # RAdeg (28-37)
(38, 48), # DEdeg (39-48) (38, 48), # DEdeg (39-48)
(49, 55), # ePos (50-55) (49, 55), # ePos (50-55)
(56, 63), # Signi (57-63) (56, 63), # Signi (57-63)
(64, 76), # Flux (65-76) (64, 76), # Flux (65-76)
(77, 89), # e_Flux (78-89) (77, 89), # e_Flux (78-89)
(90, 118), # CName (91-118) (90, 118), # CName (91-118)
(119, 120),# NewXray (120) (119, 120), # NewXray (120)
(121, 134) # Type (122-134) (121, 134), # Type (122-134)
] ]
# Define column names # Define column names
colnames = [ colnames = [
"SrcID", "Name", "RAdeg", "DEdeg", "ePos", "Signi", "Flux", "SrcID",
"e_Flux", "CName", "NewXray", "Type" "Name",
"RAdeg",
"DEdeg",
"ePos",
"Signi",
"Flux",
"e_Flux",
"CName",
"NewXray",
"Type",
] ]
# Read the file using the fixed-width format # Read the file using the fixed-width format
catalog = pd.read_fwf(catalog_file, colspecs=colspecs, names=colnames) catalog = pd.read_fwf(catalog_file, colspecs=colspecs, names=colnames)
for col in ['Name', 'CName', 'Type']: for col in ["Name", "CName", "Type"]:
catalog[col] = catalog[col].fillna('') catalog[col] = catalog[col].fillna("")
self.stdout.write(str(catalog.head())) self.stdout.write(str(catalog.head()))
# LOAD THE CATALOG INTO THE DATABASE # LOAD THE CATALOG INTO THE DATABASE
# ********************************** # **********************************
existing_srcids = set( existing_srcids = set(CatalogSource.objects.values_list("srcid", flat=True))
CatalogSource.objects.values_list('srcid', flat=True)
)
to_create = [] to_create = []
for _, row in catalog.iterrows(): for _, row in catalog.iterrows():
srcid = int(row['SrcID']) srcid = int(row["SrcID"])
if srcid in existing_srcids: if srcid in existing_srcids:
continue continue
to_create.append( to_create.append(
CatalogSource( CatalogSource(
srcid = srcid, srcid=srcid,
name = row['Name'].strip(), name=row["Name"].strip(),
ra_deg = float(row['RAdeg']), ra_deg=float(row["RAdeg"]),
dec_deg = float(row['DEdeg']), dec_deg=float(row["DEdeg"]),
pos_error = float(row['ePos']), pos_error=float(row["ePos"]),
significance = float(row['Signi']), significance=float(row["Signi"]),
flux = float(row['Flux']), flux=float(row["Flux"]),
flux_error = float(row['e_Flux']), flux_error=float(row["e_Flux"]),
catalog_name = row['CName'].strip(), catalog_name=row["CName"].strip(),
new_xray = bool(int(row['NewXray'])), new_xray=bool(int(row["NewXray"])),
source_type = row['Type'].strip() source_type=row["Type"].strip(),
) )
) )
if to_create: if to_create:
self.stdout.write(f'Inserting {len(to_create)} new catalog rows.') self.stdout.write(f"Inserting {len(to_create)} new catalog rows.")
for chunk in batch(to_create, BATCH_SIZE): for chunk in batch(to_create, BATCH_SIZE):
CatalogSource.objects.bulk_create(chunk, ignore_conflicts=True) CatalogSource.objects.bulk_create(chunk, ignore_conflicts=True)
self.stdout.write('Catalog update complete.') self.stdout.write("Catalog update complete.")
else: else:
self.stdout.write('All catalog rows already exist in the database.') self.stdout.write("All catalog rows already exist in the database.")
# hard coded nside and flux-radius mapping # hard coded nside and flux-radius mapping
# maybe change that # maybe change that
@ -157,28 +161,32 @@ class Command(BaseCommand):
nside = 4096 nside = 4096
npix = hp.nside2npix(nside) npix = hp.nside2npix(nside)
flux_bins = [0, 125, 250, 2000, 20000, np.inf] # define bin edges flux_bins = [0, 125, 250, 2000, 20000, np.inf] # define bin edges
mask_radii_deg = [ 0.06, 0.15, 0.5, 0.9, 2.5 ] # corresponding mask radii in degrees mask_radii_deg = [
0.06,
0.15,
0.5,
0.9,
2.5,
] # corresponding mask radii in degrees
# Convert mask radii from degrees to radians (required by query_disc) # Convert mask radii from degrees to radians (required by query_disc)
mask_radii = [np.radians(r) for r in mask_radii_deg] mask_radii = [np.radians(r) for r in mask_radii_deg]
# Use pandas.cut to assign each source a bin index (0, 1, or 2) # Use pandas.cut to assign each source a bin index (0, 1, or 2)
catalog['flux_bin'] = pd.cut(catalog['Flux'], bins=flux_bins, labels=False) catalog["flux_bin"] = pd.cut(catalog["Flux"], bins=flux_bins, labels=False)
# manually add and change some sources # manually add and change some sources
manual_additions = pd.DataFrame( manual_additions = pd.DataFrame(
[ [
{'RAdeg' : 279.9804336, 'DEdeg' : 5.0669542, 'flux_bin' : 3}, {"RAdeg": 279.9804336, "DEdeg": 5.0669542, "flux_bin": 3},
{'RAdeg' : 266.5173685, 'DEdeg' : -29.1252321, 'flux_bin' : 3}, {"RAdeg": 266.5173685, "DEdeg": -29.1252321, "flux_bin": 3},
] ]
) )
catalog = pd.concat([catalog, manual_additions], ignore_index=True) catalog = pd.concat([catalog, manual_additions], ignore_index=True)
catalog.loc[catalog['SrcID'] == 1101, 'flux_bin'] = 2 catalog.loc[catalog["SrcID"] == 1101, "flux_bin"] = 2
mask_array = np.ones(npix, dtype=bool) mask_array = np.ones(npix, dtype=bool)
@ -189,17 +197,15 @@ class Command(BaseCommand):
# process each source in the catalog # process each source in the catalog
for _, row in catalog.iterrows(): for _, row in catalog.iterrows():
ra = row['RAdeg'] ra = row["RAdeg"]
dec = row['DEdeg'] dec = row["DEdeg"]
src_coord = SkyCoord( src_coord = SkyCoord(ra, dec, unit="deg", frame="icrs")
ra, dec, unit = 'deg', frame = 'icrs'
)
gal = src_coord.galactic gal = src_coord.galactic
ra, dec = gal.l.deg, gal.b.deg ra, dec = gal.l.deg, gal.b.deg
flux_bin = row['flux_bin'] # 0, 1, or 2 flux_bin = row["flux_bin"] # 0, 1, or 2
# Get the corresponding mask radius (in radians) for this flux bin # Get the corresponding mask radius (in radians) for this flux bin
radius = mask_radii[flux_bin] radius = mask_radii[flux_bin]
@ -217,8 +223,6 @@ class Command(BaseCommand):
# Add the pixel indices to our set of masked pixels # Add the pixel indices to our set of masked pixels
masked_pixels_set.update(pix_indices) masked_pixels_set.update(pix_indices)
# Convert the set of masked pixels to a sorted list. # Convert the set of masked pixels to a sorted list.
masked_pixels_list = sorted(list(masked_pixels_set)) masked_pixels_list = sorted(list(masked_pixels_set))
@ -226,15 +230,13 @@ class Command(BaseCommand):
self.stdout.write("\nList ready, updating the database...") self.stdout.write("\nList ready, updating the database...")
if not masked_pixels_list: if not masked_pixels_list:
self.stdout.write("No pixels marked as contaminated, exiting.") self.stdout.write("No pixels marked as contaminated, exiting.")
return return
total = len(masked_pixels_list) total = len(masked_pixels_list)
updated = 0 updated = 0
self.stdout.write(f'\nUpdating contaminated flag in batches of {BATCH_SIZE}') self.stdout.write(f"\nUpdating contaminated flag in batches of {BATCH_SIZE}")
for chunk in batch(masked_pixels_list, BATCH_SIZE): for chunk in batch(masked_pixels_list, BATCH_SIZE):
with transaction.atomic(): with transaction.atomic():
@ -244,6 +246,8 @@ class Command(BaseCommand):
percentage = updated / total * 100 percentage = updated / total * 100
timestamp = datetime.now().strftime("%H:%M:%S") timestamp = datetime.now().strftime("%H:%M:%S")
self.stdout.write(f'[{timestamp}] {updated}/{total} ({percentage:.1f}%) updated') self.stdout.write(
f"[{timestamp}] {updated}/{total} ({percentage:.1f}%) updated"
)
self.stdout.write(f'\n Marked {updated} pixels as contaminated.') self.stdout.write(f"\n Marked {updated} pixels as contaminated.")

View File

@ -4,22 +4,20 @@ from django.db import models
from django.db.models import UniqueConstraint from django.db.models import UniqueConstraint
class Pixel(models.Model): class Pixel(models.Model):
#id = models.AutoField(primary_key=True) # ~200 million pixels for a 4096 survey # id = models.AutoField(primary_key=True) # ~200 million pixels for a 4096 survey
# no need to set explicitly # no need to set explicitly
# WILL ONLY HOLD 10 SURVEYS AS AN AUTOFIELD (IntegerField, ~2 billion limit) # WILL ONLY HOLD 10 SURVEYS AS AN AUTOFIELD (IntegerField, ~2 billion limit)
# BIGAUTOFIELD WILL BE REQUIRED FOR MORE! # BIGAUTOFIELD WILL BE REQUIRED FOR MORE!
survey = models.PositiveSmallIntegerField() survey = models.PositiveSmallIntegerField()
hpid = models.IntegerField(db_index=True) # up to over 200 million hpid = models.IntegerField(db_index=True) # up to over 200 million
counts = models.IntegerField() # f4, up to ~44k integer: 2 byte too small counts = models.IntegerField() # f4, up to ~44k integer: 2 byte too small
exposure = models.FloatField() # f4, up to ~13300 float exposure = models.FloatField() # f4, up to ~13300 float
contaminated = models.BooleanField(default=False) contaminated = models.BooleanField(default=False)
@ -27,28 +25,26 @@ class Pixel(models.Model):
return f"Pixel {self.id} hpid {self.hpid} (Survey {self.survey.number})" return f"Pixel {self.id} hpid {self.hpid} (Survey {self.survey.number})"
class CatalogSource(models.Model): class CatalogSource(models.Model):
srcid = models.SmallIntegerField(primary_key=True) srcid = models.SmallIntegerField(primary_key=True)
name = models.CharField(max_length=21) name = models.CharField(max_length=21)
ra_deg = models.FloatField() ra_deg = models.FloatField()
dec_deg = models.FloatField() dec_deg = models.FloatField()
pos_error = models.FloatField() pos_error = models.FloatField()
significance = models.FloatField() significance = models.FloatField()
flux = models.FloatField() flux = models.FloatField()
flux_error = models.FloatField() flux_error = models.FloatField()
catalog_name = models.CharField(max_length=28) catalog_name = models.CharField(max_length=28)
new_xray = models.BooleanField(default=False) new_xray = models.BooleanField(default=False)
source_type = models.CharField(max_length=13) source_type = models.CharField(max_length=13)

View File

@ -1,8 +1,3 @@
# uplim/serializers.py # uplim/serializers.py
from rest_framework import serializers from rest_framework import serializers
from uplim.models import Pixel from uplim.models import Pixel
# class PixelSerializer(serializers.ModelSerializer):
# class Meta:
# model = Pixel
# fields = ('hpid', 'counts', 'exposure', 'contaminated')

View File

@ -1,10 +1,10 @@
# uplim/urls.py # uplim/urls.py
from django.urls import path from django.urls import path
from .views import PixelAggregateView, UpperLimitView #, PixelDetailView from .views import PixelAggregateView, UpperLimitView # , PixelDetailView
urlpatterns = [ urlpatterns = [
#path('pixel/<int:hpid>/', PixelDetailView.as_view(), name='pixel-detail'), # path('pixel/<int:hpid>/', PixelDetailView.as_view(), name='pixel-detail'),
path('pixel-aggregate/', PixelAggregateView.as_view(), name='pixel-aggregate'), path("pixel-aggregate/", PixelAggregateView.as_view(), name="pixel-aggregate"),
path('upper-limit/', UpperLimitView.as_view(), name='upper-limit'), path("upper-limit/", UpperLimitView.as_view(), name="upper-limit"),
] ]

276
views.py
View File

@ -4,25 +4,24 @@
# search for pixels non-inclusively # search for pixels non-inclusively
import healpy as hp import healpy as hp
import astropy.units as u import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
import numpy as np import numpy as np
import scipy.special as sp import scipy.special as sp
from astropy.coordinates import SkyCoord, Angle
from astropy.stats import poisson_conf_interval from astropy.stats import poisson_conf_interval
from django.db.models import Sum from django.db.models import Sum
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView from rest_framework.views import APIView
from rest_framework.response import Response from rest_framework.response import Response
from rest_framework import status from rest_framework import status
from django.shortcuts import get_object_or_404
from uplim.models import Pixel, CatalogSource from uplim.models import Pixel, CatalogSource
# SANITIZE RESPONSE DATA BEFORE JSON CONVERSION FOR DEBUGGING NANS # SANITIZE RESPONSE DATA BEFORE JSON CONVERSION FOR DEBUGGING NANS
# now NaNs are converted to 'null' beforehand # now NaNs are converted to 'null' beforehand
# **************************************************************** # ****************************************************************
def sanitize(obj): def sanitize(obj):
if isinstance(obj, dict): if isinstance(obj, dict):
return {k: sanitize(v) for k, v in obj.items()} return {k: sanitize(v) for k, v in obj.items()}
@ -52,17 +51,17 @@ def parse_survey_param(raw):
return sorted(surveys) return sorted(surveys)
# PIXEL VIEW (MOSTLY FOR TESTING) # PIXEL VIEW (MOSTLY FOR TESTING)
# add healpix indices into the output # add healpix indices into the output
# ************************************************************** # **************************************************************
class PixelAggregateView(APIView): class PixelAggregateView(APIView):
def get(self, request): def get(self, request):
# GET PARAMETERS FROM THE QUERY # GET PARAMETERS FROM THE QUERY
# ************************************************************** # **************************************************************
raw_pixel = request.query_params.get("pixel") raw_pixel = request.query_params.get("pixel")
raw_survey = request.query_params.get("survey") raw_survey = request.query_params.get("survey")
# 400 BADREQUEST # 400 BADREQUEST
@ -70,7 +69,7 @@ class PixelAggregateView(APIView):
if raw_pixel is None or raw_survey is None: if raw_pixel is None or raw_survey is None:
return Response( return Response(
{"detail": "Both `pixel` and `survey` parameters are required."}, {"detail": "Both `pixel` and `survey` parameters are required."},
status=status.HTTP_400_BAD_REQUEST status=status.HTTP_400_BAD_REQUEST,
) )
# FILTER THE INPUTS # FILTER THE INPUTS
@ -80,7 +79,7 @@ class PixelAggregateView(APIView):
except ValueError: except ValueError:
return Response( return Response(
{"detail": "`pixel` must be an integer."}, {"detail": "`pixel` must be an integer."},
status=status.HTTP_400_BAD_REQUEST status=status.HTTP_400_BAD_REQUEST,
) )
try: try:
@ -88,35 +87,25 @@ class PixelAggregateView(APIView):
except ValueError: except ValueError:
return Response( return Response(
{"detail": "Malformed `survey`; use N, N,M or N-M formats."}, {"detail": "Malformed `survey`; use N, N,M or N-M formats."},
status=status.HTTP_400_BAD_REQUEST status=status.HTTP_400_BAD_REQUEST,
) )
# FILTER AND AGGREGATE # FILTER AND AGGREGATE
# ************************************************************** # **************************************************************
qs = Pixel.objects.filter( qs = Pixel.objects.filter(hpid=hpid, survey__in=survey_numbers)
hpid=hpid,
survey__in=survey_numbers
)
if not qs.exists(): if not qs.exists():
# no matching pixel(s) → 404 # no matching pixel(s) → 404
get_object_or_404( get_object_or_404(Pixel, hpid=hpid, survey__in=survey_numbers)
Pixel,
hpid=hpid,
survey__in=survey_numbers
)
aggregates = qs.aggregate( aggregates = qs.aggregate(
#pixel_hpid=hpid, # pixel_hpid=hpid,
#survey_number=survey_numbers, # survey_number=survey_numbers,
total_counts=Sum("counts"), total_counts=Sum("counts"),
total_exposure=Sum("exposure") total_exposure=Sum("exposure"),
) )
plusdata = { plusdata = {"pixel_hpid": hpid, "surveys": survey_numbers}
'pixel_hpid' : hpid,
'surveys' : survey_numbers
}
result = {**aggregates, **plusdata} result = {**aggregates, **plusdata}
@ -125,7 +114,6 @@ class PixelAggregateView(APIView):
return Response(result, status=status.HTTP_200_OK) return Response(result, status=status.HTTP_200_OK)
# UPPER LIMIT COMPUTATION VIEW # UPPER LIMIT COMPUTATION VIEW
# ************************************************************** # **************************************************************
@ -134,34 +122,35 @@ class UpperLimitView(APIView):
""" """
Calculate confidence bounds based on aperture photometry using classic and bayesian methods Calculate confidence bounds based on aperture photometry using classic and bayesian methods
""" """
def get(self, request): def get(self, request):
# GET PARAMETERS FROM THE REQUEST # GET PARAMETERS FROM THE REQUEST
# ************************************************************** # **************************************************************
try: try:
ra = float(request.query_params.get('ra')) ra = float(request.query_params.get("ra"))
dec = float(request.query_params.get('dec')) dec = float(request.query_params.get("dec"))
confidence_level = float(request.query_params.get('cl')) confidence_level = float(request.query_params.get("cl"))
except (TypeError, ValueError): except (TypeError, ValueError):
return Response( return Response(
{"error": "Invalud parameters, provide RA, DEC, and CL"}, {"error": "Invalud parameters, provide RA, DEC, and CL"},
status = status.HTTP_400_BAD_REQUEST status=status.HTTP_400_BAD_REQUEST,
) )
# ── NEW: pull & parse survey selection ── # ── NEW: pull & parse survey selection ──
raw_survey = request.query_params.get('survey') raw_survey = request.query_params.get("survey")
if raw_survey is None: if raw_survey is None:
return Response( return Response(
{"error": "Missing required `survey` parameter (e.g. ?survey=1,3-5)"}, {"error": "Missing required `survey` parameter (e.g. ?survey=1,3-5)"},
status=status.HTTP_400_BAD_REQUEST status=status.HTTP_400_BAD_REQUEST,
) )
try: try:
survey_numbers = parse_survey_param(raw_survey) survey_numbers = parse_survey_param(raw_survey)
except ValueError: except ValueError:
return Response( return Response(
{"error": "Malformed `survey`; use formats like 1, 2-5, or 1,3-4"}, {"error": "Malformed `survey`; use formats like 1, 2-5, or 1,3-4"},
status=status.HTTP_400_BAD_REQUEST status=status.HTTP_400_BAD_REQUEST,
) )
# hp = HEALPix( # hp = HEALPix(
# nside = 4096, # nside = 4096,
@ -173,78 +162,69 @@ class UpperLimitView(APIView):
# MAP ITSELF WAS MADE IN GALACTIC COORDINATES # MAP ITSELF WAS MADE IN GALACTIC COORDINATES
# ************************************************************** # **************************************************************
src_coord = SkyCoord( src_coord = SkyCoord(ra, dec, unit="deg", frame="icrs")
ra, dec, unit = 'deg', frame = 'icrs'
)
gal = src_coord.galactic gal = src_coord.galactic
src_vec = hp.ang2vec(gal.l.deg, gal.b.deg, lonlat = True) src_vec = hp.ang2vec(gal.l.deg, gal.b.deg, lonlat=True)
# DEFINE APERTURE AND ANNULUS RADII # DEFINE APERTURE AND ANNULUS RADII
# ************************************************************** # **************************************************************
aperture_radius = 71 # radius of the aperture in arc seconds aperture_radius = 71 # radius of the aperture in arc seconds
# HPD ~48 arcseconds # HPD ~48 arcseconds
# 90% ~100 arcseconds # 90% ~100 arcseconds
annulus_inner = 142 # 2 * aperture_radius annulus_inner = 142 # 2 * aperture_radius
annulus_outer = 284 # 4 * aperture_radius annulus_outer = 284 # 4 * aperture_radius
# FETCH PIXEL DATA DEFINED VIA HP.QUERY_DISC (INCLUSIVE=FALSE) # FETCH PIXEL DATA DEFINED VIA HP.QUERY_DISC (INCLUSIVE=FALSE)
# ************************************************************** # **************************************************************
source_pixel_list = hp.query_disc( source_pixel_list = hp.query_disc(
nside = 4096, nside=4096,
vec = src_vec, vec=src_vec,
inclusive = False, inclusive=False,
nest = False, nest=False,
radius = (aperture_radius * u.arcsecond).to(u.radian).value radius=(aperture_radius * u.arcsecond).to(u.radian).value,
) )
inner_pixel_list = hp.query_disc( inner_pixel_list = hp.query_disc(
nside = 4096, nside=4096,
vec = src_vec, vec=src_vec,
inclusive = False, inclusive=False,
nest = False, nest=False,
radius = (annulus_inner * u.arcsecond).to(u.radian).value radius=(annulus_inner * u.arcsecond).to(u.radian).value,
) )
outer_pixel_list = hp.query_disc( outer_pixel_list = hp.query_disc(
nside = 4096, nside=4096,
vec = src_vec, vec=src_vec,
inclusive = False, inclusive=False,
nest = False, nest=False,
radius = (annulus_outer * u.arcsecond).to(u.radian).value radius=(annulus_outer * u.arcsecond).to(u.radian).value,
) )
annulus_pixel_list = [ annulus_pixel_list = [
item for item in outer_pixel_list if item not in inner_pixel_list item for item in outer_pixel_list if item not in inner_pixel_list
] ]
source_pixels = Pixel.objects.filter( source_pixels = Pixel.objects.filter(
hpid__in = source_pixel_list, hpid__in=source_pixel_list, survey__in=survey_numbers
survey__in = survey_numbers
) )
annulus_pixels = Pixel.objects.filter( annulus_pixels = Pixel.objects.filter(
hpid__in = annulus_pixel_list, hpid__in=annulus_pixel_list, survey__in=survey_numbers
survey__in = survey_numbers
) )
# check contamination # check contamination
contamination = ( contamination = (
source_pixels.filter(contaminated=True).exists() or source_pixels.filter(contaminated=True).exists()
annulus_pixels.filter(contaminated=True).exists() or annulus_pixels.filter(contaminated=True).exists()
) )
if not source_pixels.exists() and not annulus_pixels.exists(): if not source_pixels.exists() and not annulus_pixels.exists():
return Response( return Response(
{"detail": "No pixel data for the given survey selection."}, {"detail": "No pixel data for the given survey selection."},
status=status.HTTP_404_NOT_FOUND status=status.HTTP_404_NOT_FOUND,
) )
# COMPUTE COUNTS, BACKGROUND ESTIMATE, EXPOSURE # COMPUTE COUNTS, BACKGROUND ESTIMATE, EXPOSURE
# ************************************************************** # **************************************************************
@ -266,62 +246,59 @@ class UpperLimitView(APIView):
# CONSTANTS # CONSTANTS
# ************************************************************** # **************************************************************
#EEF = .9 # eclosed energy fraction, .5 for hpd, .9 for w90 # EEF = .9 # eclosed energy fraction, .5 for hpd, .9 for w90
#ECF = 4e-11 # energy conversion factor # ECF = 4e-11 # energy conversion factor
EEF = .80091 # use values from the paper EEF = 0.80091 # use values from the paper
ECF = 3.3423184e-11 ECF = 3.3423184e-11
# BAYESIAN IMPLEMENTATION VIA POISSON_CONF_INTERVAL # BAYESIAN IMPLEMENTATION VIA POISSON_CONF_INTERVAL
# ************************************************************** # **************************************************************
low, high = poisson_conf_interval( low, high = poisson_conf_interval(
n = N, n=N,
background = B, background=B,
interval = 'kraft-burrows-nousek', interval="kraft-burrows-nousek",
confidence_level=confidence_level confidence_level=confidence_level,
) )
bayesian_count_ul = high bayesian_count_ul = high
bayesian_count_ll = low bayesian_count_ll = low
bayesian_rate_ul = bayesian_count_ul / t / EEF # count rate limits bayesian_rate_ul = bayesian_count_ul / t / EEF # count rate limits
bayesian_rate_ll = bayesian_count_ll / t / EEF bayesian_rate_ll = bayesian_count_ll / t / EEF
bayesian_flux_ul = bayesian_rate_ul * ECF # flux limits bayesian_flux_ul = bayesian_rate_ul * ECF # flux limits
bayesian_flux_ll = bayesian_rate_ll * ECF bayesian_flux_ll = bayesian_rate_ll * ECF
# CLASSICAL IMPLEMENTATION VIA GAMMAINCCINV # CLASSICAL IMPLEMENTATION VIA GAMMAINCCINV
# **************************************************************** # ****************************************************************
classic_count_ul = sp.gammainccinv(N+1, 1 - confidence_level) - B classic_count_ul = sp.gammainccinv(N + 1, 1 - confidence_level) - B
classic_count_ll = sp.gammainccinv(N, confidence_level) - B classic_count_ll = sp.gammainccinv(N, confidence_level) - B
if not np.isfinite(classic_count_ll) or classic_count_ll < 0: if not np.isfinite(classic_count_ll) or classic_count_ll < 0:
classic_count_ll = 0.0 classic_count_ll = 0.0
classic_rate_ul = classic_count_ul / t / EEF # count rate limits
classic_rate_ul = classic_count_ul / t / EEF # count rate limits
classic_rate_ll = classic_count_ll / t / EEF classic_rate_ll = classic_count_ll / t / EEF
classic_flux_ul = classic_rate_ul * ECF # flux limits classic_flux_ul = classic_rate_ul * ECF # flux limits
classic_flux_ll = classic_rate_ll * ECF classic_flux_ll = classic_rate_ll * ECF
# FLUX ESTIMATION # FLUX ESTIMATION
# **************************************************************** # ****************************************************************
S = N - B # counts as simply counts within aperture S = N - B # counts as simply counts within aperture
# with the background estimate subtracted # with the background estimate subtracted
CR = S / t / EEF # source count rate
CR = S / t / EEF # source count rate BR = B / t # background rate within aperture
BR = B / t # background rate within aperture FL = CR * ECF # conversion to flux
FL = CR * ECF # conversion to flux Flux = max(FL, 0) # flux cannot be lower than zero
Flux = max(FL, 0) # flux cannot be lower than zero
# NEARBY SOURCES CHECK # NEARBY SOURCES CHECK
# **************************************************************** # ****************************************************************
@ -330,39 +307,37 @@ class UpperLimitView(APIView):
radius_deg = radius_as / 3600 radius_deg = radius_as / 3600
dec_min = max(dec - radius_deg, -90) dec_min = max(dec - radius_deg, -90)
dec_max = min(dec + radius_deg, 90) dec_max = min(dec + radius_deg, 90)
# cheap belt query # cheap belt query
belt_sources = CatalogSource.objects.filter( belt_sources = CatalogSource.objects.filter(
dec_deg__gte = dec_min, dec_deg__gte=dec_min, dec_deg__lte=dec_max
dec_deg__lte = dec_max
) )
center_coord = SkyCoord(ra, dec, unit='deg') center_coord = SkyCoord(ra, dec, unit="deg")
nearby_sources = [] nearby_sources = []
#refine belt to circular region using astropy separation # refine belt to circular region using astropy separation
for catsrc in belt_sources: for catsrc in belt_sources:
catsrc_coord = SkyCoord(catsrc.ra_deg, catsrc.dec_deg, unit='deg') catsrc_coord = SkyCoord(catsrc.ra_deg, catsrc.dec_deg, unit="deg")
if center_coord.separation(catsrc_coord).deg <= radius_deg: if center_coord.separation(catsrc_coord).deg <= radius_deg:
nearby_sources.append( nearby_sources.append(
{ {
'srcid' : catsrc.srcid, "srcid": catsrc.srcid,
'name' : catsrc.name, "name": catsrc.name,
'ra_deg' : catsrc.ra_deg, "ra_deg": catsrc.ra_deg,
'dec_deg' : catsrc.dec_deg, "dec_deg": catsrc.dec_deg,
'pos_error' : catsrc.pos_error, "pos_error": catsrc.pos_error,
'significance' : catsrc.significance, "significance": catsrc.significance,
'flux' : catsrc.flux, "flux": catsrc.flux,
'flux_error' : catsrc.flux_error, "flux_error": catsrc.flux_error,
'catalog_name' : catsrc.catalog_name, "catalog_name": catsrc.catalog_name,
'new_xray' : catsrc.new_xray, "new_xray": catsrc.new_xray,
'source_type' : catsrc.source_type "source_type": catsrc.source_type,
} }
) )
# SQUARE REGION IMAGE SERVING # SQUARE REGION IMAGE SERVING
# **************************************************************** # ****************************************************************
@ -370,44 +345,43 @@ class UpperLimitView(APIView):
map_radius = annulus_outer * np.sqrt(2) map_radius = annulus_outer * np.sqrt(2)
map_pixel_list = hp.query_disc( map_pixel_list = hp.query_disc(
nside = 4096, nside=4096,
vec = src_vec, vec=src_vec,
inclusive = False, inclusive=False,
nest = False, nest=False,
radius = (map_radius * u.arcsecond).to(u.radian).value radius=(map_radius * u.arcsecond).to(u.radian).value,
) )
# fetch those pixels for the requested surveys # fetch those pixels for the requested surveys
# summing counts and sorting by hpid # summing counts and sorting by hpid
map_pixels_qs = ( map_pixels_qs = (
Pixel.objects Pixel.objects.filter(hpid__in=map_pixel_list, survey__in=survey_numbers)
.filter(hpid__in = map_pixel_list, survey__in = survey_numbers) .values("hpid")
.values('hpid') .annotate(counts=Sum("counts"))
.annotate(counts=Sum('counts')) .order_by("hpid")
.order_by('hpid')
) )
# turn the queryset to a list # turn the queryset to a list
map_pixels_list = list(map_pixels_qs) map_pixels_list = list(map_pixels_qs)
# get lists of healpix indices and count values # get lists of healpix indices and count values
map_healpix_list = [d['hpid'] for d in map_pixels_list] map_healpix_list = [d["hpid"] for d in map_pixels_list]
map_counts_list = [d['counts'] for d in map_pixels_list] map_counts_list = [d["counts"] for d in map_pixels_list]
# set map nside # set map nside
map_nside = 4096 map_nside = 4096
# set map order # set map order
map_order = 'ring' map_order = "ring"
# assemble the result dict # assemble the result dict
map_dict = { map_dict = {
'healpix' : map_healpix_list, "healpix": map_healpix_list,
'counts' : map_counts_list, "counts": map_counts_list,
'nside' : map_nside, "nside": map_nside,
'order' : map_order, "order": map_order,
'radius_as' : map_radius "radius_as": map_radius,
} }
# RESULT JSON # RESULT JSON
@ -415,36 +389,36 @@ class UpperLimitView(APIView):
result = { result = {
# frequentist limits # frequentist limits
'ClassicUpperLimit' : classic_count_ul, "ClassicUpperLimit": classic_count_ul,
'ClassicLowerLimit' : classic_count_ll, "ClassicLowerLimit": classic_count_ll,
'ClassicCountRateUpperLimit' : classic_rate_ul, "ClassicCountRateUpperLimit": classic_rate_ul,
'ClassicCountRateLowerLimit' : classic_rate_ll, "ClassicCountRateLowerLimit": classic_rate_ll,
'ClassicFluxUpperLimit' : classic_flux_ul, "ClassicFluxUpperLimit": classic_flux_ul,
'ClassicFluxLowerLimit' : classic_flux_ll, "ClassicFluxLowerLimit": classic_flux_ll,
# bayesian limits # bayesian limits
'BayesianUpperLimit' : bayesian_count_ul, "BayesianUpperLimit": bayesian_count_ul,
'BayesianLowerLimit' : bayesian_count_ll, "BayesianLowerLimit": bayesian_count_ll,
'BayesianCountRateUpperLimit' : bayesian_rate_ul, "BayesianCountRateUpperLimit": bayesian_rate_ul,
'BayesianCountRateLowerLimit' : bayesian_rate_ll, "BayesianCountRateLowerLimit": bayesian_rate_ll,
'BayesianFluxUpperLimit' : bayesian_flux_ul, "BayesianFluxUpperLimit": bayesian_flux_ul,
'BayesianFluxLowerLimit' : bayesian_flux_ll, "BayesianFluxLowerLimit": bayesian_flux_ll,
# flux 'center value' estimate # flux 'center value' estimate
'FluxEstimate' : Flux, "FluxEstimate": Flux,
# raw data # raw data
'ApertureCounts' : N, "ApertureCounts": N,
'ApertureBackgroundCounts' : B, "ApertureBackgroundCounts": B,
'SourceCounts' : S, "SourceCounts": S,
'Exposure' : t, "Exposure": t,
# count rates # count rates
'SourceRate' : CR, "SourceRate": CR,
'BackgroundRate' : BR, "BackgroundRate": BR,
# contamination # contamination
'Contamination' : contamination, "Contamination": contamination,
'NearbySources' : nearby_sources, "NearbySources": nearby_sources,
# count map for the frontend image # count map for the frontend image
'CountMap' : map_dict "CountMap": map_dict,
} }
clean = sanitize(result) # calling sanitize() to convert NaN to null clean = sanitize(result) # calling sanitize() to convert NaN to null
return Response(clean, status=status.HTTP_200_OK) return Response(clean, status=status.HTTP_200_OK)