anonuser7251 commited on
Commit
f8c5348
1 Parent(s): 0551f0c

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. GBI-16-2D.py +172 -0
  3. README.md +83 -0
  4. data/LR.20051204.41155.fits +0 -0
  5. data/LR.20051204.41651.fits +0 -0
  6. data/LR.20051204.43259.fits +0 -0
  7. data/LR.20051204.43899.fits +0 -0
  8. data/LR.20051204.46034.fits +0 -0
  9. data/LR.20051204.47387.fits +0 -0
  10. data/LR.20051204.49021.fits +0 -0
  11. data/LR.20051204.51257.fits +0 -0
  12. data/LR.20051204.53196.fits +0 -0
  13. data/LR.20051204.54066.fits +0 -0
  14. data/LR.20051204.56002.fits +0 -0
  15. data/LR.20051204.57105.fits +0 -0
  16. data/LR.20051204.57873.fits +0 -0
  17. data/LR.20060530.30214.fits +0 -0
  18. data/LR.20060530.32407.fits +0 -0
  19. data/LR.20060530.36483.fits +0 -0
  20. data/LR.20060530.43065.fits +0 -0
  21. data/LR.20060530.45164.fits +0 -0
  22. data/LR.20060530.46025.fits +0 -0
  23. data/LR.20060530.48970.fits +0 -0
  24. data/LR.20060530.50806.fits +0 -0
  25. data/LR.20060530.51656.fits +0 -0
  26. data/LR.20060531.46897.fits +0 -0
  27. data/LR.20060531.49568.fits +0 -0
  28. data/LR.20060531.50684.fits +0 -0
  29. data/LR.20060531.50878.fits +0 -0
  30. data/LR.20060725.29836.fits +0 -0
  31. data/LR.20060725.37294.fits +0 -0
  32. data/LR.20060725.42247.fits +0 -0
  33. data/LR.20060725.44412.fits +0 -0
  34. data/LR.20060725.46740.fits +0 -0
  35. data/LR.20060725.47513.fits +0 -0
  36. data/LR.20060725.49810.fits +0 -0
  37. data/LR.20060726.41842.fits +0 -0
  38. data/LR.20060726.48303.fits +0 -0
  39. data/LR.20060726.49184.fits +0 -0
  40. data/LR.20060921.21065.fits +0 -0
  41. data/LR.20060921.30235.fits +0 -0
  42. data/LR.20060921.30742.fits +0 -0
  43. data/LR.20060921.31853.fits +0 -0
  44. data/LR.20060921.33371.fits +0 -0
  45. data/LR.20060921.43710.fits +0 -0
  46. data/LR.20061121.19974.fits +0 -0
  47. data/LR.20061121.27414.fits +0 -0
  48. data/LR.20061121.49514.fits +0 -0
  49. data/LR.20070416.21338.fits +0 -0
  50. data/LR.20070416.24302.fits +0 -0
.gitattributes CHANGED
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ *.fits filter=lfs diff=lfs merge=lfs -text
GBI-16-2D.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from glob import glob
4
+ import json
5
+
6
+ import numpy as np
7
+ from astropy.io import fits
8
+ from astropy.coordinates import Angle
9
+ from astropy import units as u
10
+ from fsspec.core import url_to_fs
11
+
12
+ from huggingface_hub import hf_hub_download
13
+ import datasets
14
+ from datasets import DownloadManager
15
+
16
+ from utils import read_lris
17
+
18
+
19
+ _DESCRIPTION = (
20
+ """SBI-16-2D is a dataset which is part of the AstroCompress project. """
21
+ """It contains data assembled from the Keck Telescope. """
22
+ """<TODO>Describe data format</TODO>"""
23
+ )
24
+
25
+ _HOMEPAGE = "https://google.github.io/AstroCompress"
26
+
27
+ _LICENSE = "CC BY 4.0"
28
+
29
+ _URL = "https://huggingface.co/datasets/AstroCompress/GBI-16-2D/resolve/main/"
30
+
31
+ _URLS = {
32
+ "tiny": {
33
+ "train": "./splits/tiny_train.jsonl",
34
+ "test": "./splits/tiny_test.jsonl",
35
+ },
36
+ "full": {
37
+ "train": "./splits/full_train.jsonl",
38
+ "test": "./splits/full_test.jsonl",
39
+ },
40
+ }
41
+
42
+ _REPO_ID = "AstroCompress/GBI-16-2D"
43
+
44
+
45
+ class GBI_16_2D(datasets.GeneratorBasedBuilder):
46
+ """GBI-16-2D Dataset"""
47
+
48
+ VERSION = datasets.Version("1.0.1")
49
+
50
+ BUILDER_CONFIGS = [
51
+ datasets.BuilderConfig(
52
+ name="tiny",
53
+ version=VERSION,
54
+ description="A small subset of the data, to test downsteam workflows.",
55
+ ),
56
+ datasets.BuilderConfig(
57
+ name="full",
58
+ version=VERSION,
59
+ description="The full dataset",
60
+ ),
61
+ ]
62
+
63
+ DEFAULT_CONFIG_NAME = "tiny"
64
+
65
+ def __init__(self, **kwargs):
66
+ super().__init__(version=self.VERSION, **kwargs)
67
+
68
+ def _info(self):
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=datasets.Features(
72
+ {
73
+ "image": datasets.Image(decode=True, mode="I;16"),
74
+ "ra": datasets.Value("float64"),
75
+ "dec": datasets.Value("float64"),
76
+ "pixscale": datasets.Value("float64"),
77
+ "image_id": datasets.Value("string"),
78
+ "rotation_angle": datasets.Value("float64"),
79
+ "dim_1": datasets.Value("int64"),
80
+ "dim_2": datasets.Value("int64"),
81
+ "exposure_time": datasets.Value("float64"),
82
+ }
83
+ ),
84
+ supervised_keys=None,
85
+ homepage=_HOMEPAGE,
86
+ license=_LICENSE,
87
+ citation="TBD",
88
+ )
89
+
90
+ def _split_generators(self, dl_manager: DownloadManager):
91
+
92
+ ret = []
93
+ base_path = dl_manager._base_path
94
+ locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
95
+ _, path = url_to_fs(base_path)
96
+
97
+ for split in ["train", "test"]:
98
+ if locally_run:
99
+ split_file_location = os.path.normpath(
100
+ os.path.join(path, _URLS[self.config.name][split])
101
+ )
102
+ split_file = dl_manager.download_and_extract(split_file_location)
103
+ else:
104
+ split_file = hf_hub_download(
105
+ repo_id=_REPO_ID,
106
+ filename=_URLS[self.config.name][split],
107
+ repo_type="dataset",
108
+ )
109
+ with open(split_file, encoding="utf-8") as f:
110
+ data_filenames = []
111
+ data_metadata = []
112
+ for line in f:
113
+ item = json.loads(line)
114
+ data_filenames.append(item["image"])
115
+ data_metadata.append(
116
+ {
117
+ "ra": item["ra"],
118
+ "dec": item["dec"],
119
+ "pixscale": item["pixscale"],
120
+ "image_id": item["image_id"],
121
+ "rotation_angle": item["rotation_angle"],
122
+ "dim_1": item["dim_1"],
123
+ "dim_2": item["dim_2"],
124
+ "exposure_time": item["exposure_time"],
125
+ }
126
+ )
127
+ if locally_run:
128
+ data_urls = [
129
+ os.path.normpath(os.path.join(path, data_filename))
130
+ for data_filename in data_filenames
131
+ ]
132
+ data_files = [
133
+ dl_manager.download(data_url) for data_url in data_urls
134
+ ]
135
+ else:
136
+ data_urls = data_filenames
137
+ data_files = [
138
+ hf_hub_download(
139
+ repo_id=_REPO_ID, filename=data_url, repo_type="dataset"
140
+ )
141
+ for data_url in data_urls
142
+ ]
143
+ ret.append(
144
+ datasets.SplitGenerator(
145
+ name=(
146
+ datasets.Split.TRAIN
147
+ if split == "train"
148
+ else datasets.Split.TEST
149
+ ),
150
+ gen_kwargs={
151
+ "filepaths": data_files,
152
+ "split_file": split_file,
153
+ "split": split,
154
+ "data_metadata": data_metadata,
155
+ },
156
+ ),
157
+ )
158
+ return ret
159
+
160
+ def _generate_examples(self, filepaths, split_file, split, data_metadata):
161
+ """Generate GBI-16-2D examples"""
162
+
163
+ for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
164
+ task_instance_key = f"{self.config.name}-{split}-{idx}"
165
+ with fits.open(filepath, memmap=False) as hdul:
166
+ if len(hdul) > 1:
167
+ # multiextension ... paste together the amplifiers
168
+ data, _ = read_lris(filepath)
169
+ else:
170
+ data = hdul[0].data
171
+ image_data = data[:, :]
172
+ yield task_instance_key, {**{"image": image_data}, **item}
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ pretty_name: Ground-based Imaging data
4
+ tags:
5
+ - astronomy
6
+ - compression
7
+ - images
8
+ ---
9
+
10
+ # GBI-16-2D Dataset
11
+
12
+ SGBI-16-2D is a dataset which is part of the AstroCompress project. It contains data assembled from the Keck Telescope. <TODO>Describe data format</TODO>
13
+
14
+ # Usage
15
+
16
+ You first need to install the `datasets` and `astropy` packages:
17
+
18
+ ```bash
19
+ pip install datasets astropy
20
+ ```
21
+
22
+ There are two datasets: `tiny` and `full`, each with `train` and `test` splits. The `tiny` dataset has 2 2D images in the `train` and 1 in the `test`. The `full` dataset contains all the images in the `data/` directory.
23
+
24
+ ## Local Use (RECOMMENDED)
25
+
26
+ You can clone this repo and use directly without connecting to hf:
27
+
28
+ ```bash
29
+ git clone https://huggingface.co/datasets/AstroCompress/GBI-16-2D
30
+ ```
31
+
32
+ ```bash
33
+ git lfs pull
34
+ ```
35
+
36
+ Then `cd SBI-16-3D` and start python like:
37
+
38
+ ```python
39
+ from datasets import load_dataset
40
+ dataset = load_dataset("./GBI-16-2D.py", "tiny", data_dir="./data/", writer_batch_size=1, trust_remote_code=True)
41
+ ds = dataset.with_format("np")
42
+ ```
43
+
44
+ Now you should be able to use the `ds` variable like:
45
+
46
+ ```python
47
+ ds["test"][0]["image"].shape # -> (TBD)
48
+ ```
49
+
50
+ Note of course that it will take a long time to download and convert the images in the local cache for the `full` dataset. Afterward, the usage should be quick as the files are memory-mapped from disk.
51
+
52
+
53
+ ## Use from Huggingface Directly
54
+
55
+ This method may only be an option when trying to access the "tiny" version of the dataset.
56
+
57
+ To directly use from this data from Huggingface, you'll want to log in on the command line before starting python:
58
+
59
+ ```bash
60
+ huggingface-cli login
61
+ ```
62
+
63
+ or
64
+
65
+ ```
66
+ import huggingface_hub
67
+ huggingface_hub.login(token=token)
68
+ ```
69
+
70
+ Then in your python script:
71
+
72
+ ```python
73
+ from datasets import load_dataset
74
+ dataset = load_dataset("AstroCompress/GBI-16-2D", "tiny", writer_batch_size=1, trust_remote_code=True)
75
+ ds = dataset.with_format("np")
76
+ ```
77
+
78
+
79
+ ## Demo Colab Notebook
80
+ We provide a demo collab notebook to get started on using the dataset [here](https://colab.research.google.com/drive/1SuFBPZiYZg9LH4pqypc_v8Sp99lShJqZ?usp=sharing).
81
+
82
+ ## Utils scripts
83
+ Note that utils scripts such as `eval_baselines.py` must be run from the parent directory of `utils`, i.e. `python utils/eval_baselines.py`.
data/LR.20051204.41155.fits ADDED
data/LR.20051204.41651.fits ADDED
data/LR.20051204.43259.fits ADDED
data/LR.20051204.43899.fits ADDED
data/LR.20051204.46034.fits ADDED
data/LR.20051204.47387.fits ADDED
data/LR.20051204.49021.fits ADDED
data/LR.20051204.51257.fits ADDED
data/LR.20051204.53196.fits ADDED
data/LR.20051204.54066.fits ADDED
data/LR.20051204.56002.fits ADDED
data/LR.20051204.57105.fits ADDED
data/LR.20051204.57873.fits ADDED
data/LR.20060530.30214.fits ADDED
data/LR.20060530.32407.fits ADDED
data/LR.20060530.36483.fits ADDED
data/LR.20060530.43065.fits ADDED
data/LR.20060530.45164.fits ADDED
data/LR.20060530.46025.fits ADDED
data/LR.20060530.48970.fits ADDED
data/LR.20060530.50806.fits ADDED
data/LR.20060530.51656.fits ADDED
data/LR.20060531.46897.fits ADDED
data/LR.20060531.49568.fits ADDED
data/LR.20060531.50684.fits ADDED
data/LR.20060531.50878.fits ADDED
data/LR.20060725.29836.fits ADDED
data/LR.20060725.37294.fits ADDED
data/LR.20060725.42247.fits ADDED
data/LR.20060725.44412.fits ADDED
data/LR.20060725.46740.fits ADDED
data/LR.20060725.47513.fits ADDED
data/LR.20060725.49810.fits ADDED
data/LR.20060726.41842.fits ADDED
data/LR.20060726.48303.fits ADDED
data/LR.20060726.49184.fits ADDED
data/LR.20060921.21065.fits ADDED
data/LR.20060921.30235.fits ADDED
data/LR.20060921.30742.fits ADDED
data/LR.20060921.31853.fits ADDED
data/LR.20060921.33371.fits ADDED
data/LR.20060921.43710.fits ADDED
data/LR.20061121.19974.fits ADDED
data/LR.20061121.27414.fits ADDED
data/LR.20061121.49514.fits ADDED
data/LR.20070416.21338.fits ADDED
data/LR.20070416.24302.fits ADDED