File size: 4,021 Bytes
e081d27 66f8ed0 e081d27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import os
import random
from glob import glob
import json
from huggingface_hub import hf_hub_download
from tqdm import tqdm
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
import datasets
from datasets import DownloadManager
from fsspec.core import url_to_fs
def get_fits_footprint(fits_path):
"""
Process a FITS file to extract WCS information and calculate the footprint.
Parameters:
fits_path (str): Path to the FITS file.
Returns:
tuple: A tuple containing the WCS footprint coordinates.
"""
with fits.open(fits_path) as hdul:
wcs = WCS(hdul[1].header)
shape = sorted(tuple(wcs.pixel_shape))[:2]
footprint = wcs.calc_footprint(axes=shape)
coords1 = list(footprint.flatten())
wcs = WCS(hdul[4].header)
shape = sorted(tuple(wcs.pixel_shape))[:2]
footprint = wcs.calc_footprint(axes=shape)
coords2 = list(footprint.flatten())
return coords1, coords2
def calculate_pixel_scale(header):
"""
Calculate the pixel scale separately for X and Y directions and return the mean pixel scale from a FITS header.
Args:
header: A FITS header object containing CD1_1, CD1_2, CD2_1, and CD2_2.
Returns:
mean_pixscale: The mean pixel scale in arcseconds per pixel.
"""
# Extract CD matrix elements
CD1_1 = header['CD1_1']
CD1_2 = header['CD1_2']
CD2_1 = header['CD2_1']
CD2_2 = header['CD2_2']
# Calculate pixel scales
pixscale_x = (CD1_1**2 + CD1_2**2)**0.5 * 3600 # Convert from degrees to arcseconds
pixscale_y = (CD2_1**2 + CD2_2**2)**0.5 * 3600 # Convert from degrees to arcseconds
# Calculate mean pixel scale
mean_pixscale = (pixscale_x + pixscale_y) / 2
return mean_pixscale
def make_split_jsonl_files(
config_type="tiny", data_dir="./data", outdir="./splits", seed=42
):
"""
Create jsonl files for the SBI-16-2D dataset.
config_type: str, default="tiny"
The type of split to create. Options are "tiny" and "full".
data_dir: str, default="./data"
The directory where the FITS files are located.
outdir: str, default="./splits"
The directory where the jsonl files will be created.
seed: int, default=42
The seed for the random split.
"""
random.seed(seed)
os.makedirs(outdir, exist_ok=True)
fits_files = glob(os.path.join(data_dir, "*.fits"))
random.shuffle(fits_files)
if config_type == "tiny":
train_files = fits_files[:2]
test_files = fits_files[2:3]
elif config_type == "full":
split_idx = int(0.8 * len(fits_files))
train_files = fits_files[:split_idx]
test_files = fits_files[split_idx:]
else:
raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
def create_jsonl(files, split_name):
output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
with open(output_file, "w") as out_f:
for file in tqdm(files):
#print(file, flush=True, end="...")
with fits.open(file, memmap=False) as hdul:
image_id = os.path.basename(file).split(".fits")[0]
ra = hdul["SCI"].header.get("CRVAL1", 0)
dec = hdul["SCI"].header.get("CRVAL2", 0)
pixscale = calculate_pixel_scale(hdul[1].header)
footprint = get_fits_footprint(file)
item = {
"image_id": image_id,
"image": file,
"ra": ra,
"dec": dec,
"pixscale": pixscale,
"footprint": footprint
}
out_f.write(json.dumps(item) + "\n")
create_jsonl(train_files, "train")
create_jsonl(test_files, "test")
if __name__ == "__main__":
make_split_jsonl_files("tiny")
make_split_jsonl_files("full") |