jbloom commited on
Commit
289df1b
1 Parent(s): a61c40f

initial commit

Browse files
Files changed (3) hide show
  1. .gitattributes +2 -0
  2. README.md +70 -3
  3. SBI-16-2D.py +207 -0
.gitattributes CHANGED
@@ -49,6 +49,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
49
  *.gif filter=lfs diff=lfs merge=lfs -text
50
  *.png filter=lfs diff=lfs merge=lfs -text
51
  *.tiff filter=lfs diff=lfs merge=lfs -text
 
 
52
  # Image files - compressed
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
 
49
  *.gif filter=lfs diff=lfs merge=lfs -text
50
  *.png filter=lfs diff=lfs merge=lfs -text
51
  *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ *.fits filter=lfs diff=lfs merge=lfs -text
53
+ *.fit filter=lfs diff=lfs merge=lfs -text
54
  # Image files - compressed
55
  *.jpg filter=lfs diff=lfs merge=lfs -text
56
  *.jpeg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,70 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ pretty_name: Raw space-based images from the Hubble Space Telescope
4
+ tags:
5
+ - astronomy
6
+ - compression
7
+ - images
8
+ ---
9
+
10
+ # SBI-16-2D Dataset
11
+
12
+ SBI-16-2D is a dataset which is part of the AstroCompress project. It contains imaging data assembled from the Hubble Space Telescope (HST). <TODO>Describe data format</TODO>
13
+
14
+ # Usage
15
+
16
+ You first need to install the `datasets` and `astropy` packages:
17
+
18
+ ```bash
19
+ pip install datasets astropy
20
+ ```
21
+
22
+ There are two datasets: `tiny` and `full`, each with `train` and `test` splits. The `tiny` dataset has 2 2D images in the `train` and 1 in the `test`. The `full` dataset contains all the images in the `data/` directory.
23
+
24
+ ## Use from Huggingface Directly
25
+
26
+ To directly use from this data from Huggingface, you'll want to log in on the command line before starting python:
27
+
28
+ ```bash
29
+ huggingface-cli login
30
+ ```
31
+
32
+ or
33
+
34
+ ```
35
+ import huggingface_hub
36
+ huggingface_hub.login(token=token)
37
+ ```
38
+
39
+ Then in your python script:
40
+
41
+ ```python
42
+ from datasets import load_dataset
43
+ dataset = load_dataset("AstroCompress/SBI-16-2D", "tiny")
44
+ ds = dataset.with_format("np")
45
+ ```
46
+
47
+ ## Local Use
48
+
49
+ Alternatively, you can clone this repo and use directly without connecting to hf:
50
+
51
+ ```bash
52
+ git clone https://huggingface.co/datasets/AstroCompress/SBI-16-2D
53
+ ```
54
+
55
+ Then `cd SBI-16-3D` and start python like:
56
+
57
+ ```python
58
+ from datasets import load_dataset
59
+ dataset = load_dataset("./SBI-16-2D.py", "tiny", data_dir="./data/")
60
+ ds = dataset.with_format("np")
61
+ ```
62
+
63
+ Now you should be able to use the `ds` variable like:
64
+
65
+ ```python
66
+ ds["test"][0]["image"].shape # -> (TBD)
67
+ ```
68
+
69
+ Note of course that it will take a long time to download and convert the images in the local cache for the `full` dataset. Afterward, the usage should be quick as the files are memory-mapped from disk.
70
+
SBI-16-2D.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from glob import glob
4
+ import json
5
+ from huggingface_hub import hf_hub_download
6
+
7
+
8
+ from astropy.io import fits
9
+ import datasets
10
+ from datasets import DownloadManager
11
+ from fsspec.core import url_to_fs
12
+
13
+ _DESCRIPTION = """
14
+ SBI-16-2D is a dataset which is part of the AstroCompress project.
15
+ It contains imaging data assembled from the Hubble Space Telescope (HST).
16
+ """
17
+
18
+ _HOMEPAGE = "https://google.github.io/AstroCompress"
19
+
20
+ _LICENSE = "CC BY 4.0"
21
+
22
+ _URL = "https://huggingface.co/datasets/AstroCompress/SBI-16-2D/resolve/main/"
23
+
24
+ _URLS = {
25
+ "tiny": {
26
+ "train": "./splits/tiny_train.jsonl",
27
+ "test": "./splits/tiny_test.jsonl",
28
+ },
29
+ "full": {
30
+ "train": "./splits/full_train.jsonl",
31
+ "test": "./splits/full_test.jsonl",
32
+ },
33
+ }
34
+
35
+ _REPO_ID = "AstroCompress/SBI-16-2D"
36
+
37
+
38
+ class GBI_16_4D(datasets.GeneratorBasedBuilder):
39
+ """GBI-16-4D Dataset"""
40
+
41
+ VERSION = datasets.Version("1.0.0")
42
+
43
+ BUILDER_CONFIGS = [
44
+ datasets.BuilderConfig(
45
+ name="tiny",
46
+ version=VERSION,
47
+ description="A small subset of the data, to test downsteam workflows.",
48
+ ),
49
+ datasets.BuilderConfig(
50
+ name="full",
51
+ version=VERSION,
52
+ description="The full dataset",
53
+ ),
54
+ ]
55
+
56
+ DEFAULT_CONFIG_NAME = "tiny"
57
+
58
+ def __init__(self, **kwargs):
59
+ super().__init__(version=self.VERSION, **kwargs)
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "image": datasets.Image(decode=True, mode="I;16"),
67
+ "ra": datasets.Value("float64"),
68
+ "dec": datasets.Value("float64"),
69
+ "pixscale": datasets.Value("float64"),
70
+ "image_id": datasets.Value("string"),
71
+ }
72
+ ),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ license=_LICENSE,
76
+ citation="TBD",
77
+ )
78
+
79
+ def _split_generators(self, dl_manager: DownloadManager):
80
+
81
+ ret = []
82
+ base_path = dl_manager._base_path
83
+ locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
84
+ _, path = url_to_fs(base_path)
85
+
86
+ for split in ["train", "test"]:
87
+ if locally_run:
88
+ split_file_location = os.path.normpath(
89
+ os.path.join(path, _URLS[self.config.name][split])
90
+ )
91
+ split_file = dl_manager.download_and_extract(split_file_location)
92
+ else:
93
+ split_file = hf_hub_download(
94
+ repo_id=_REPO_ID,
95
+ filename=_URLS[self.config.name][split],
96
+ repo_type="dataset",
97
+ )
98
+ with open(split_file, encoding="utf-8") as f:
99
+ data_filenames = []
100
+ data_metadata = []
101
+ for line in f:
102
+ item = json.loads(line)
103
+ data_filenames.append(item["image"])
104
+ data_metadata.append(
105
+ {
106
+ "ra": item["ra"],
107
+ "dec": item["dec"],
108
+ "pixscale": item["pixscale"],
109
+ "image_id": item["image_id"],
110
+ }
111
+ )
112
+ if locally_run:
113
+ data_urls = [
114
+ os.path.normpath(os.path.join(path, data_filename))
115
+ for data_filename in data_filenames
116
+ ]
117
+ data_files = [
118
+ dl_manager.download(data_url) for data_url in data_urls
119
+ ]
120
+ else:
121
+ data_urls = data_filenames
122
+ data_files = [
123
+ hf_hub_download(
124
+ repo_id=_REPO_ID, filename=data_url, repo_type="dataset"
125
+ )
126
+ for data_url in data_urls
127
+ ]
128
+ ret.append(
129
+ datasets.SplitGenerator(
130
+ name=(
131
+ datasets.Split.TRAIN
132
+ if split == "train"
133
+ else datasets.Split.TEST
134
+ ),
135
+ gen_kwargs={
136
+ "filepaths": data_files,
137
+ "split_file": split_file,
138
+ "split": split,
139
+ "data_metadata": data_metadata,
140
+ },
141
+ ),
142
+ )
143
+ return ret
144
+
145
+ def _generate_examples(self, filepaths, split_file, split, data_metadata):
146
+ """Generate SBI-16-2D examples"""
147
+
148
+ for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
149
+ task_instance_key = f"{self.config.name}-{split}-{idx}"
150
+ with fits.open(filepath, memmap=False) as hdul:
151
+ # the first axis is length one, so we take the first element
152
+ # the second axis is the time axis and varies between images
153
+ image_data = hdul["SCI"].data[:, :].tolist()
154
+ yield task_instance_key, {**{"image": image_data}, **item}
155
+
156
+
157
+ def make_split_jsonl_files(
158
+ config_type="tiny", data_dir="./data", outdir="./splits", seed=42
159
+ ):
160
+ """
161
+ Create jsonl files for the SBI-16-2D dataset.
162
+
163
+ config_type: str, default="tiny"
164
+ The type of split to create. Options are "tiny" and "full".
165
+ data_dir: str, default="./data"
166
+ The directory where the FITS files are located.
167
+ outdir: str, default="./splits"
168
+ The directory where the jsonl files will be created.
169
+ seed: int, default=42
170
+ The seed for the random split.
171
+ """
172
+ random.seed(seed)
173
+ os.makedirs(outdir, exist_ok=True)
174
+
175
+ fits_files = glob(os.path.join(data_dir, "*.fits"))
176
+ random.shuffle(fits_files)
177
+ if config_type == "tiny":
178
+ train_files = fits_files[:2]
179
+ test_files = fits_files[2:3]
180
+ elif config_type == "full":
181
+ split_idx = int(0.8 * len(fits_files))
182
+ train_files = fits_files[:split_idx]
183
+ test_files = fits_files[split_idx:]
184
+ else:
185
+ raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
186
+
187
+ def create_jsonl(files, split_name):
188
+ output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
189
+ with open(output_file, "w") as out_f:
190
+ for file in files:
191
+ print(file, flush=True, end="...")
192
+ with fits.open(file, memmap=False) as hdul:
193
+ image_id = os.path.basename(file).split(".fits")[0]
194
+ ra = hdul["SCI"].header.get("CRVAL1", 0)
195
+ dec = hdul["SCI"].header.get("CRVAL2", 0)
196
+ pixscale = hdul["SCI"].header.get("CD1_2", 0.396)
197
+ item = {
198
+ "image_id": image_id,
199
+ "image": file,
200
+ "ra": ra,
201
+ "dec": dec,
202
+ "pixscale": pixscale,
203
+ }
204
+ out_f.write(json.dumps(item) + "\n")
205
+
206
+ create_jsonl(train_files, "train")
207
+ create_jsonl(test_files, "test")