rithwiks commited on
Commit
2c59d47
β€’
1 Parent(s): ef312f6

added code to do downloading and filtering

Browse files
Files changed (1) hide show
  1. utils/hubble_filtering.ipynb +730 -0
utils/hubble_filtering.ipynb ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "6e84dd0f",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "from tqdm import tqdm\n",
12
+ "import glob\n",
13
+ "from astropy.io import fits\n",
14
+ "import os\n",
15
+ "from astropy.io import fits\n",
16
+ "from astropy.wcs import WCS\n",
17
+ "from spherical_geometry.polygon import SphericalPolygon\n",
18
+ "import os\n",
19
+ "from astropy.io import fits\n",
20
+ "from astropy.wcs import WCS\n",
21
+ "from spherical_geometry.polygon import SphericalPolygon\n",
22
+ "from sklearn.cluster import AgglomerativeClustering\n",
23
+ "import matplotlib.pyplot as plt\n",
24
+ "import pandas as pd\n",
25
+ "from astropy.io import fits\n",
26
+ "import pandas as pd\n",
27
+ "import matplotlib.pyplot as plt\n",
28
+ "import numpy as np\n",
29
+ "\n",
30
+ "def get_all_fits_files(root_dir):\n",
31
+ " # Use glob to recursively find all .fits files\n",
32
+ " pattern = os.path.join(root_dir, '**', '*.fits')\n",
33
+ " fits_files = glob.glob(pattern, recursive=True)\n",
34
+ " return fits_files"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 9,
40
+ "id": "4f34a245",
41
+ "metadata": {},
42
+ "outputs": [
43
+ {
44
+ "name": "stderr",
45
+ "output_type": "stream",
46
+ "text": [
47
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [02:03<00:00, 8.81s/it]\n"
48
+ ]
49
+ }
50
+ ],
51
+ "source": [
52
+ "dirs = [d for d in os.listdir('.') if os.path.isdir(d) and str(d).startswith(\"MAST\")]\n",
53
+ "\n",
54
+ "all_fits = []\n",
55
+ "\n",
56
+ "for d in tqdm(dirs):\n",
57
+ " fits_files = get_all_fits_files(d)\n",
58
+ " all_fits.extend(fits_files)"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 30,
64
+ "id": "51770e43",
65
+ "metadata": {},
66
+ "outputs": [
67
+ {
68
+ "name": "stderr",
69
+ "output_type": "stream",
70
+ "text": [
71
+ " 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 10175/23915 [08:12<12:36, 18.16it/s]WARNING: File may have been truncated: actual file length (28813816) is smaller than the expected size (33598080) [astropy.io.fits.file]\n",
72
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 23915/23915 [20:00<00:00, 19.92it/s]"
73
+ ]
74
+ },
75
+ {
76
+ "name": "stdout",
77
+ "output_type": "stream",
78
+ "text": [
79
+ "2149\n"
80
+ ]
81
+ },
82
+ {
83
+ "name": "stderr",
84
+ "output_type": "stream",
85
+ "text": [
86
+ "\n"
87
+ ]
88
+ }
89
+ ],
90
+ "source": [
91
+ "ct = 0\n",
92
+ "\n",
93
+ "valid_fits_paths = []\n",
94
+ "\n",
95
+ "for fits_path in tqdm(all_fits):\n",
96
+ " with fits.open(fits_path) as hdul:\n",
97
+ " try:\n",
98
+ " if hdul[1].data.dtype == np.dtype('uint16'):\n",
99
+ " #print(hdul.info())\n",
100
+ " assert hdul[1].data.shape == hdul[4].data.shape\n",
101
+ " ct += 1\n",
102
+ " valid_fits_paths.append(fits_path)\n",
103
+ " except:\n",
104
+ " continue\n",
105
+ " \n",
106
+ "print(ct)"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": 33,
112
+ "id": "cfad3290",
113
+ "metadata": {},
114
+ "outputs": [
115
+ {
116
+ "name": "stdout",
117
+ "output_type": "stream",
118
+ "text": [
119
+ "File paths saved to valid_fits_paths.txt\n"
120
+ ]
121
+ }
122
+ ],
123
+ "source": [
124
+ "def save_filepaths_to_text(filepaths, output_file):\n",
125
+ " try:\n",
126
+ " with open(output_file, 'w') as f:\n",
127
+ " for filepath in filepaths:\n",
128
+ " f.write(filepath + '\\n')\n",
129
+ " print(f\"File paths saved to {output_file}\")\n",
130
+ " except Exception as e:\n",
131
+ " print(f\"Error saving file paths: {e}\")\n",
132
+ "\n",
133
+ "save_filepaths_to_text(valid_fits_paths, \"valid_fits_paths.txt\")\n"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": 11,
139
+ "id": "1a460324",
140
+ "metadata": {},
141
+ "outputs": [],
142
+ "source": [
143
+ "valid_fits_paths = os.listdir('./data')"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": 12,
149
+ "id": "e68b6a9e",
150
+ "metadata": {},
151
+ "outputs": [
152
+ {
153
+ "name": "stderr",
154
+ "output_type": "stream",
155
+ "text": [
156
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2149/2149 [02:00<00:00, 17.77it/s]\n"
157
+ ]
158
+ }
159
+ ],
160
+ "source": [
161
+ "# Initialize the list of confirmed FITS paths\n",
162
+ "confirmed_fits_paths = []\n",
163
+ "\n",
164
+ "all_polys = []\n",
165
+ "\n",
166
+ "for i in tqdm(range(len(valid_fits_paths))):\n",
167
+ "\n",
168
+ " path1 = os.path.join('data', valid_fits_paths[i])\n",
169
+ " try:\n",
170
+ " with fits.open(path1) as hdul1:\n",
171
+ " wcs1a = WCS(hdul1[1].header)\n",
172
+ " shape1a = sorted(tuple(wcs1a.pixel_shape))[:2]\n",
173
+ "\n",
174
+ " wcs1b = WCS(hdul1[4].header)\n",
175
+ " shape1b = sorted(tuple(wcs1b.pixel_shape))[:2]\n",
176
+ "\n",
177
+ " # Get the footprints of the two WCS frames\n",
178
+ " footprint1a = wcs1a.calc_footprint(axes=shape1a)\n",
179
+ " footprint1b = wcs1b.calc_footprint(axes=shape1b)\n",
180
+ "\n",
181
+ "\n",
182
+ " # Define two polygons\n",
183
+ " poly1a = SphericalPolygon.from_radec(footprint1a[:, 0], footprint1a[:, 1])\n",
184
+ " poly1b = SphericalPolygon.from_radec(footprint1b[:, 0], footprint1b[:, 1])\n",
185
+ "\n",
186
+ " poly1 = poly1a.union(poly1b)\n",
187
+ "\n",
188
+ " all_polys.append(poly1)\n",
189
+ " except:\n",
190
+ " continue"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "code",
195
+ "execution_count": 13,
196
+ "id": "72347e84",
197
+ "metadata": {},
198
+ "outputs": [
199
+ {
200
+ "name": "stderr",
201
+ "output_type": "stream",
202
+ "text": [
203
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2148/2148 [00:00<00:00, 77320.99it/s]\n"
204
+ ]
205
+ }
206
+ ],
207
+ "source": [
208
+ "latitudes = []\n",
209
+ "longitudes = []\n",
210
+ "\n",
211
+ "for poly in tqdm(all_polys):\n",
212
+ " pts = list(poly.to_radec())[0]\n",
213
+ " ra = pts[0][0]\n",
214
+ " dec = pts[1][0]\n",
215
+ " \n",
216
+ " longitudes.append(ra)\n",
217
+ " latitudes.append(dec)"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "code",
222
+ "execution_count": 14,
223
+ "id": "a396a37f",
224
+ "metadata": {},
225
+ "outputs": [
226
+ {
227
+ "name": "stdout",
228
+ "output_type": "stream",
229
+ "text": [
230
+ "Symmetric?\n",
231
+ "True\n",
232
+ "(2148, 2148)\n"
233
+ ]
234
+ }
235
+ ],
236
+ "source": [
237
+ "n_points = len(latitudes)\n",
238
+ "\n",
239
+ "# Repeat each point n_points times for lat1, lon1\n",
240
+ "lat1 = np.repeat(latitudes, n_points)\n",
241
+ "lon1 = np.repeat(longitudes, n_points)\n",
242
+ "\n",
243
+ "# Tile the whole array n_points times for lat2, lon2\n",
244
+ "lat2 = np.tile(latitudes, n_points)\n",
245
+ "lon2 = np.tile(longitudes, n_points)\n",
246
+ "\n",
247
+ "# Calculates angular separation between two spherical coords\n",
248
+ "# This can be lat/lon or ra/dec\n",
249
+ "# Taken from astropy\n",
250
+ "def angular_separation_deg(lon1, lat1, lon2, lat2):\n",
251
+ " lon1 = np.deg2rad(lon1)\n",
252
+ " lon2 = np.deg2rad(lon2)\n",
253
+ " lat1 = np.deg2rad(lat1)\n",
254
+ " lat2 = np.deg2rad(lat2)\n",
255
+ " \n",
256
+ " sdlon = np.sin(lon2 - lon1)\n",
257
+ " cdlon = np.cos(lon2 - lon1)\n",
258
+ " slat1 = np.sin(lat1)\n",
259
+ " slat2 = np.sin(lat2)\n",
260
+ " clat1 = np.cos(lat1)\n",
261
+ " clat2 = np.cos(lat2)\n",
262
+ "\n",
263
+ " num1 = clat2 * sdlon\n",
264
+ " num2 = clat1 * slat2 - slat1 * clat2 * cdlon\n",
265
+ " denominator = slat1 * slat2 + clat1 * clat2 * cdlon\n",
266
+ "\n",
267
+ " return np.rad2deg(np.arctan2(np.hypot(num1, num2), denominator))\n",
268
+ "\n",
269
+ "# Compute the pairwise angular separations\n",
270
+ "angular_separations = angular_separation_deg(lon1, lat1, lon2, lat2)\n",
271
+ "\n",
272
+ "# Reshape the result into a matrix form\n",
273
+ "angular_separations_matrix = angular_separations.reshape(n_points, n_points)\n",
274
+ "\n",
275
+ "def check_symmetric(a, rtol=1e-05, atol=1e-07):\n",
276
+ " return np.allclose(a, a.T, rtol=rtol, atol=atol)\n",
277
+ "\n",
278
+ "print(\"Symmetric?\")\n",
279
+ "print(check_symmetric(angular_separations_matrix))\n",
280
+ "print(angular_separations_matrix.shape)"
281
+ ]
282
+ },
283
+ {
284
+ "cell_type": "code",
285
+ "execution_count": 15,
286
+ "id": "ae7ed213",
287
+ "metadata": {},
288
+ "outputs": [],
289
+ "source": [
290
+ "HUBBLE_FOV = 0.057\n",
291
+ "#JWST_FOV = 0.0366667\n",
292
+ "\n",
293
+ "THRESH = HUBBLE_FOV * 3\n",
294
+ "\n",
295
+ "clustering = AgglomerativeClustering(n_clusters=None, metric='precomputed', linkage='single', distance_threshold=THRESH)\n",
296
+ "labels = clustering.fit_predict(angular_separations_matrix)"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": 17,
302
+ "id": "bd5cc1db",
303
+ "metadata": {},
304
+ "outputs": [
305
+ {
306
+ "name": "stderr",
307
+ "output_type": "stream",
308
+ "text": [
309
+ " 0%| | 1/1947 [00:00<03:29, 9.28it/s]"
310
+ ]
311
+ },
312
+ {
313
+ "name": "stdout",
314
+ "output_type": "stream",
315
+ "text": [
316
+ "FAIL 0.2290158291821388\n"
317
+ ]
318
+ },
319
+ {
320
+ "name": "stderr",
321
+ "output_type": "stream",
322
+ "text": [
323
+ " 2%|β–‹ | 30/1947 [00:19<05:46, 5.54it/s]"
324
+ ]
325
+ },
326
+ {
327
+ "name": "stdout",
328
+ "output_type": "stream",
329
+ "text": [
330
+ "FAIL 0.25478384325067566\n",
331
+ "FAIL 0.11201573962968173\n"
332
+ ]
333
+ },
334
+ {
335
+ "name": "stderr",
336
+ "output_type": "stream",
337
+ "text": [
338
+ "\r",
339
+ " 2%|β–‹ | 32/1947 [00:20<04:39, 6.86it/s]"
340
+ ]
341
+ },
342
+ {
343
+ "name": "stdout",
344
+ "output_type": "stream",
345
+ "text": [
346
+ "FAIL 0.08182961205102905\n"
347
+ ]
348
+ },
349
+ {
350
+ "name": "stderr",
351
+ "output_type": "stream",
352
+ "text": [
353
+ " 6%|β–ˆβ–ˆβ– | 108/1947 [00:47<08:23, 3.65it/s]"
354
+ ]
355
+ },
356
+ {
357
+ "name": "stdout",
358
+ "output_type": "stream",
359
+ "text": [
360
+ "FAIL 0.31680112298937957\n"
361
+ ]
362
+ },
363
+ {
364
+ "name": "stderr",
365
+ "output_type": "stream",
366
+ "text": [
367
+ " 24%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 470/1947 [00:51<00:06, 231.53it/s]"
368
+ ]
369
+ },
370
+ {
371
+ "name": "stdout",
372
+ "output_type": "stream",
373
+ "text": [
374
+ "FAIL 0.08882975311005689\n",
375
+ "FAIL 0.008033477806590562\n"
376
+ ]
377
+ },
378
+ {
379
+ "name": "stderr",
380
+ "output_type": "stream",
381
+ "text": [
382
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1947/1947 [00:51<00:00, 37.70it/s]\n"
383
+ ]
384
+ }
385
+ ],
386
+ "source": [
387
+ "failed_labels = []\n",
388
+ "failed_paths = []\n",
389
+ "\n",
390
+ "for label in tqdm(np.unique(labels)):\n",
391
+ " polys = [(all_polys[i], valid_fits_paths[i]) for i in range(len(labels)) if labels[i] == label]\n",
392
+ " if len(polys) > 1:\n",
393
+ " total_poly = polys[0][0]\n",
394
+ " for i in range(1, len(polys)):\n",
395
+ " new_poly = polys[i][0]\n",
396
+ " new_path = polys[i][1]\n",
397
+ " if total_poly.intersects_poly(new_poly):\n",
398
+ " union_over_max = total_poly.intersection(new_poly).area() / new_poly.area()\n",
399
+ " print(f\"FAIL {union_over_max}\")\n",
400
+ " failed_labels.append(label)\n",
401
+ " failed_paths.append(new_path)\n",
402
+ " continue\n",
403
+ " else:\n",
404
+ " total_poly = total_poly.union(new_poly)\n"
405
+ ]
406
+ },
407
+ {
408
+ "cell_type": "code",
409
+ "execution_count": 18,
410
+ "id": "7170d0e1",
411
+ "metadata": {},
412
+ "outputs": [
413
+ {
414
+ "data": {
415
+ "text/plain": [
416
+ "['j9l919b6q_raw.fits',\n",
417
+ " 'je2r07ajq_raw.fits',\n",
418
+ " 'jcdm56ncq_raw.fits',\n",
419
+ " 'j9fc0tqaq_raw.fits',\n",
420
+ " 'jbpk02ioq_raw.fits',\n",
421
+ " 'jepx44lrq_raw.fits',\n",
422
+ " 'j9cx01cfq_raw.fits']"
423
+ ]
424
+ },
425
+ "execution_count": 18,
426
+ "metadata": {},
427
+ "output_type": "execute_result"
428
+ }
429
+ ],
430
+ "source": [
431
+ "failed_paths"
432
+ ]
433
+ },
434
+ {
435
+ "cell_type": "code",
436
+ "execution_count": 105,
437
+ "id": "5baea239",
438
+ "metadata": {},
439
+ "outputs": [
440
+ {
441
+ "data": {
442
+ "text/plain": [
443
+ "22 15\n",
444
+ "58 12\n",
445
+ "49 7\n",
446
+ "55 7\n",
447
+ "28 6\n",
448
+ " ..\n",
449
+ "1493 1\n",
450
+ "1264 1\n",
451
+ "1214 1\n",
452
+ "1387 1\n",
453
+ "141 1\n",
454
+ "Name: count, Length: 1946, dtype: int64"
455
+ ]
456
+ },
457
+ "execution_count": 105,
458
+ "metadata": {},
459
+ "output_type": "execute_result"
460
+ }
461
+ ],
462
+ "source": [
463
+ "pd.Series(labels).value_counts()"
464
+ ]
465
+ },
466
+ {
467
+ "cell_type": "code",
468
+ "execution_count": null,
469
+ "id": "cbb7bf27",
470
+ "metadata": {},
471
+ "outputs": [],
472
+ "source": [
473
+ "# Function to plot the rectangle\n",
474
+ "def plot_rectangle(corners):\n",
475
+ " # Close the rectangle by repeating the first point at the end\n",
476
+ " closed_corners = np.append(corners, [corners[0]], axis=0)\n",
477
+ "\n",
478
+ " # Plot the rectangle\n",
479
+ " plt.plot(closed_corners[:, 0], closed_corners[:, 1], 'b-')\n",
480
+ " plt.scatter(corners[:, 0], corners[:, 1], color='red')\n",
481
+ " \n",
482
+ " # Annotate the points\n",
483
+ " for i, corner in enumerate(corners):\n",
484
+ " plt.annotate(f'P{i+1}', (corner[0], corner[1]), textcoords=\"offset points\", xytext=(5,5), ha='center')\n",
485
+ " \n",
486
+ " plt.xlabel('Longitude')\n",
487
+ " plt.ylabel('Latitude')\n",
488
+ " plt.title('Rectangle Plot from Given Corners')\n",
489
+ " plt.grid(True)\n",
490
+ "\n",
491
+ "# Call the function to plot the rectangle\n",
492
+ "plot_rectangle(footprint1)\n",
493
+ "plot_rectangle(footprint2)\n",
494
+ "plt.show()"
495
+ ]
496
+ },
497
+ {
498
+ "cell_type": "code",
499
+ "execution_count": 33,
500
+ "id": "37557566",
501
+ "metadata": {},
502
+ "outputs": [
503
+ {
504
+ "name": "stdout",
505
+ "output_type": "stream",
506
+ "text": [
507
+ "XTENSION= 'IMAGE ' / extension type BITPIX = 16 / bits per data value NAXIS = 2 / number of data axes NAXIS1 = 4144 / length of first data axis NAXIS2 = 2068 / length of second data axis PCOUNT = 0 / number of group parameters GCOUNT = 1 / number of groups INHERIT = T / inherit the primary header EXTNAME = 'SCI ' / extension name EXTVER = 1 / extension version number ROOTNAME= 'jcuh01euq ' / rootname of the observation setEXPNAME = 'jcuh01euq ' / exposure identifier DATAMIN = 2205. / the minimum value of the data DATAMAX = 51795. / the maximum value of the data BUNIT = 'COUNTS ' / brightness units BSCALE = 1.0 / scale factor for array value to physical value BZERO = 32768.0 / physical value for an array value of zero / WFC CCD CHIP IDENTIFICATION CCDCHIP = 2 / CCD chip (1 or 2) / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 2124.0 / x-coordinate of reference pixel CRPIX2 = 1024.0 / y-coordinate of reference pixel CRVAL1 = 2.980193405890E+02 / first axis value at reference pixel CRVAL2 = 1.447422452918E+01 / second axis value at reference pixel CTYPE1 = 'RA---TAN' / the coordinate type for the first axis CTYPE2 = 'DEC--TAN' / the coordinate type for the second axis CD1_1 = 1.40038E-06 / partial of first axis coordinate w.r.t. x CD1_2 = 1.39725E-05 / partial of first axis coordinate w.r.t. y CD2_1 = 1.37888E-05 / partial of second axis coordinate w.r.t. x CD2_2 = -4.58499E-07 / partial of second axis coordinate w.r.t. y LTV1 = 24.0 / offset in X to subsection start LTV2 = 0.0 / offset in Y to subsection start RAW_LTV1= 24.0 / original offset in X to subsection start RAW_LTV2= 0.0 / original offset in Y to subsection start LTM1_1 = 1.0 / reciprocal of sampling rate in X LTM2_2 = 1.0 / reciprocal of sampling rate in Y ORIENTAT= 91.8795 / position angle of image y axis (deg. e of n) RA_APER = 2.980491666667E+02 / RA of aperture reference position DEC_APER= 1.447333333333E+01 / Declination of aperture reference position PA_APER = 91.4584 / Position Angle of reference aperture center (deVAFACTOR= 9.999498853766E-01 / velocity aberration plate scale factor / READOUT DEFINITION PARAMETERS CENTERA1= 2073 / subarray axis1 center pt in unbinned dect. pix CENTERA2= 1035 / subarray axis2 center pt in unbinned dect. pix SIZAXIS1= 4144 / subarray axis1 size in unbinned detector pixelsSIZAXIS2= 2068 / subarray axis2 size in unbinned detector pixelsBINAXIS1= 1 / axis1 data bin size in unbinned detector pixelsBINAXIS2= 1 / axis2 data bin size in unbinned detector pixels / PHOTOMETRY KEYWORDS PHOTMODE= ' ' / obserPHOTFLAM= 0.000000000000E+00 / inverse sensitivity, ergs/cm2/Ang/electron PHOTZPT = 0.000000 / ST magnitude zero point PHOTPLAM= 0.000000 / Pivot wavelength (Angstroms) PHOTBW = 0.000000 / RMS bandwidth of filter plus detector / REPEATED EXPOSURES INFO NCOMBINE= 1 / number of image sets combined during CR rejecti / DATA PACKET INFORMATION FILLCNT = 0 / number of segments containing fill ERRCNT = 0 / number of segments containing errors PODPSFF = F / podps fill present (T/F) STDCFFF = F / science telemetry fill data present (T=1/F=0) STDCFFP = '0x5569' / science telemetry fill pattern (hex) / ON-BOARD COMPRESSION INFORMATION WFCMPRSD= F / was WFC data compressed? (T/F) CBLKSIZ = 0 / size of compression block in 2-byte words LOSTPIX = 0 / #pixels lost due to buffer overflow COMPTYP = 'None ' / compression type performed (Partial/Full/None) / IMAGE STATISTICS AND DATA QUALITY FLAGS NGOODPIX= 8569792 / number of good pixels SDQFLAGS= 31743 / serious data quality flags GOODMIN = 2205. / minimum value of good pixels GOODMAX = 51795. / maximum value of good pixels GOODMEAN= 2346.49479940703 / mean value of good pixels SOFTERRS= 0 / number of soft error pixels (DQF=1) SNRMIN = 0.000000 / minimum signal to noise of good pixels SNRMAX = 0.000000 / maximum signal to noise of good pixels SNRMEAN = 0.000000 / mean value of signal to noise of good pixels MEANDARK= 0.000000 / average of the dark values subtracted MEANBLEV= 0.000000 / average of all bias levels subtracted MEANFLSH= 0.000000 / Mean number of counts in post flash exposure END \n"
508
+ ]
509
+ }
510
+ ],
511
+ "source": [
512
+ "fitpath = \"./data/jcuh01euq_raw.fits\"\n",
513
+ "\n",
514
+ "with fits.open(fitpath) as hdul1:\n",
515
+ " print(hdul1[1].header)"
516
+ ]
517
+ },
518
+ {
519
+ "cell_type": "code",
520
+ "execution_count": 22,
521
+ "id": "da51818b",
522
+ "metadata": {},
523
+ "outputs": [],
524
+ "source": [
525
+ "def get_corners_and_metadata(fits_path):\n",
526
+ " with fits.open(fits_path) as hdul1:\n",
527
+ " wcs1a = WCS(hdul1[1].header)\n",
528
+ " shape1a = sorted(tuple(wcs1a.pixel_shape))[:2]\n",
529
+ " footprint1a = wcs1a.calc_footprint(axes=shape1a)\n",
530
+ " coords = list(footprint1a.flatten())\n",
531
+ " inf = hdul1[0].header\n",
532
+ " ra_targ = inf['RA_TARG']\n",
533
+ " dec_targ = inf['DEC_TARG']\n",
534
+ " exp_time = inf['EXPTIME']\n",
535
+ " \n",
536
+ " return coords"
537
+ ]
538
+ },
539
+ {
540
+ "cell_type": "code",
541
+ "execution_count": 23,
542
+ "id": "cac0e38b",
543
+ "metadata": {},
544
+ "outputs": [
545
+ {
546
+ "name": "stderr",
547
+ "output_type": "stream",
548
+ "text": [
549
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2142/2142 [00:26<00:00, 80.15it/s]\n"
550
+ ]
551
+ },
552
+ {
553
+ "name": "stdout",
554
+ "output_type": "stream",
555
+ "text": [
556
+ " fits_file ra1 dec1 ra2 dec2 \n",
557
+ "0 jcuh01euq_raw.fits 298.001510 14.445419 298.061288 14.443517 \\\n",
558
+ "1 jbkh05h9q_raw.fits 287.858027 -60.066247 287.752068 -60.042645 \n",
559
+ "2 jcnu10r9q_raw.fits 201.025443 -43.460893 201.056301 -43.407484 \n",
560
+ "3 jdba3qokq_raw.fits 141.681252 -24.804074 141.744131 -24.813923 \n",
561
+ "4 jdrz77m0q_raw.fits 150.936991 40.747275 151.004173 40.719653 \n",
562
+ "... ... ... ... ... ... \n",
563
+ "2136 jbkz29rzq_raw.fits 138.498478 40.943594 138.437903 40.979121 \n",
564
+ "2137 jdba3bi3q_raw.fits 131.959813 72.952872 131.893400 73.007432 \n",
565
+ "2138 jbkz90hxq_raw.fits 46.020489 -44.048250 46.094849 -44.070590 \n",
566
+ "2139 jcb805vtq_raw.fits 182.552746 49.999954 182.561072 49.942283 \n",
567
+ "2140 jdba8aw2q_raw.fits 224.393934 -19.196982 224.334389 -19.183120 \n",
568
+ "\n",
569
+ " ra3 dec3 ra4 dec4 exposure_time \n",
570
+ "0 298.064282 14.472018 298.004497 14.473921 580.0 \n",
571
+ "1 287.725102 -60.067934 287.831124 -60.091557 500.0 \n",
572
+ "2 201.021070 -43.394620 200.990187 -43.448019 430.0 \n",
573
+ "3 141.751603 -24.786090 141.688738 -24.776244 348.0 \n",
574
+ "4 151.024449 40.743832 150.957250 40.771467 390.0 \n",
575
+ "... ... ... ... ... ... \n",
576
+ "2136 138.412650 40.957740 138.473220 40.922227 400.0 \n",
577
+ "2137 131.799083 72.999647 131.865760 72.945118 348.0 \n",
578
+ "2138 46.112684 -44.044969 46.038349 -44.022640 400.0 \n",
579
+ "2139 182.605575 49.942966 182.597302 50.000640 659.0 \n",
580
+ "2140 224.325127 -19.210400 224.384680 -19.224265 348.0 \n",
581
+ "\n",
582
+ "[2141 rows x 10 columns]\n"
583
+ ]
584
+ }
585
+ ],
586
+ "source": [
587
+ "# Directory containing the FITS files\n",
588
+ "data_dir = './data'\n",
589
+ "\n",
590
+ "# List to hold the data for the DataFrame\n",
591
+ "data = []\n",
592
+ "\n",
593
+ "# Loop through all FITS files in the \"data\" directory\n",
594
+ "for fits_file in tqdm(os.listdir(data_dir)):\n",
595
+ " if fits_file.endswith('.fits'):\n",
596
+ " file_path = os.path.join(data_dir, fits_file)\n",
597
+ " ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4, exposure_time = get_corners_and_metadata(file_path)\n",
598
+ " data.append([fits_file, ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4, exposure_time])\n",
599
+ "\n",
600
+ "# Create a DataFrame\n",
601
+ "df = pd.DataFrame(data, columns=['fits_file', 'ra1', 'dec1', 'ra2', 'dec2', 'ra3', 'dec3', 'ra4', 'dec4', 'exposure_time'])\n",
602
+ "\n",
603
+ "# Display the DataFrame\n",
604
+ "print(df)"
605
+ ]
606
+ },
607
+ {
608
+ "cell_type": "code",
609
+ "execution_count": 29,
610
+ "id": "fc086514",
611
+ "metadata": {},
612
+ "outputs": [
613
+ {
614
+ "name": "stdout",
615
+ "output_type": "stream",
616
+ "text": [
617
+ "Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\n"
618
+ ]
619
+ }
620
+ ],
621
+ "source": [
622
+ "import pandas as pd\n",
623
+ "from sklearn.model_selection import train_test_split\n",
624
+ "\n",
625
+ "# Assuming df is your DataFrame\n",
626
+ "# df = pd.DataFrame(...) # Your DataFrame should already be defined\n",
627
+ "\n",
628
+ "# Perform an 85/15 train-test split\n",
629
+ "train_df, test_df = train_test_split(df, test_size=0.15, random_state=42)\n",
630
+ "\n",
631
+ "# Save the train and test DataFrames to CSV files\n",
632
+ "train_df.to_csv('train_split.csv', index=False)\n",
633
+ "test_df.to_csv('test_split.csv', index=False)\n",
634
+ "\n",
635
+ "print(\"Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\")"
636
+ ]
637
+ },
638
+ {
639
+ "cell_type": "code",
640
+ "execution_count": 28,
641
+ "id": "ab4a9a6f",
642
+ "metadata": {},
643
+ "outputs": [
644
+ {
645
+ "data": {
646
+ "text/plain": [
647
+ "['README.md',\n",
648
+ " 'hst_FINAL.csv',\n",
649
+ " 'hubble_data_filtering.ipynb',\n",
650
+ " 'data',\n",
651
+ " 'valid_fits_paths.txt',\n",
652
+ " 'SBI-16-2D.py',\n",
653
+ " '.gitattributes',\n",
654
+ " '.git',\n",
655
+ " '.ipynb_checkpoints']"
656
+ ]
657
+ },
658
+ "execution_count": 28,
659
+ "metadata": {},
660
+ "output_type": "execute_result"
661
+ }
662
+ ],
663
+ "source": [
664
+ "os.listdir('.')"
665
+ ]
666
+ },
667
+ {
668
+ "cell_type": "code",
669
+ "execution_count": 30,
670
+ "id": "2a77c29e",
671
+ "metadata": {},
672
+ "outputs": [
673
+ {
674
+ "name": "stdout",
675
+ "output_type": "stream",
676
+ "text": [
677
+ "CSV file has been converted and saved as JSONL at test_split.jsonl\n",
678
+ "CSV file has been converted and saved as JSONL at train_split.jsonl\n"
679
+ ]
680
+ }
681
+ ],
682
+ "source": [
683
+ "import pandas as pd\n",
684
+ "\n",
685
+ "names = [\"test_split\", \"train_split\"]\n",
686
+ "\n",
687
+ "for name in names:\n",
688
+ "\n",
689
+ " # Step 1: Load the CSV file into a DataFrame\n",
690
+ " csv_file_path = f'{name}.csv' # Replace with your actual CSV file path\n",
691
+ " df = pd.read_csv(csv_file_path)\n",
692
+ "\n",
693
+ " # Step 2: Save the DataFrame as a JSONL file\n",
694
+ " jsonl_file_path = f'{name}.jsonl' # Replace with your desired output file path\n",
695
+ " df.to_json(jsonl_file_path, orient='records', lines=True)\n",
696
+ "\n",
697
+ " print(f\"CSV file has been converted and saved as JSONL at {jsonl_file_path}\")"
698
+ ]
699
+ },
700
+ {
701
+ "cell_type": "code",
702
+ "execution_count": null,
703
+ "id": "c78322ff",
704
+ "metadata": {},
705
+ "outputs": [],
706
+ "source": []
707
+ }
708
+ ],
709
+ "metadata": {
710
+ "kernelspec": {
711
+ "display_name": "Python 3 (ipykernel)",
712
+ "language": "python",
713
+ "name": "python3"
714
+ },
715
+ "language_info": {
716
+ "codemirror_mode": {
717
+ "name": "ipython",
718
+ "version": 3
719
+ },
720
+ "file_extension": ".py",
721
+ "mimetype": "text/x-python",
722
+ "name": "python",
723
+ "nbconvert_exporter": "python",
724
+ "pygments_lexer": "ipython3",
725
+ "version": "3.10.13"
726
+ }
727
+ },
728
+ "nbformat": 4,
729
+ "nbformat_minor": 5
730
+ }