{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "6e84dd0f", "metadata": {}, "outputs": [], "source": [ "import os\n", "from tqdm import tqdm\n", "import glob\n", "from astropy.io import fits\n", "import os\n", "from astropy.io import fits\n", "from astropy.wcs import WCS\n", "from spherical_geometry.polygon import SphericalPolygon\n", "import os\n", "from astropy.io import fits\n", "from astropy.wcs import WCS\n", "from spherical_geometry.polygon import SphericalPolygon\n", "from sklearn.cluster import AgglomerativeClustering\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "from astropy.io import fits\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", "\"\"\"\n", "First use hubble_downloading file before using this.\n", "\"\"\"\n", "\n", "def get_all_fits_files(root_dir):\n", " # Use glob to recursively find all .fits files\n", " pattern = os.path.join(root_dir, '**', '*.fits')\n", " fits_files = glob.glob(pattern, recursive=True)\n", " return fits_files" ] }, { "cell_type": "code", "execution_count": 9, "id": "4f34a245", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████████| 14/14 [02:03<00:00, 8.81s/it]\n" ] } ], "source": [ "dirs = [d for d in os.listdir('.') if os.path.isdir(d) and str(d).startswith(\"MAST\")]\n", "\n", "all_fits = []\n", "\n", "for d in tqdm(dirs):\n", " fits_files = get_all_fits_files(d)\n", " all_fits.extend(fits_files)" ] }, { "cell_type": "code", "execution_count": 30, "id": "51770e43", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 43%|███████████████▋ | 10175/23915 [08:12<12:36, 18.16it/s]WARNING: File may have been truncated: actual file length (28813816) is smaller than the expected size (33598080) [astropy.io.fits.file]\n", "100%|█████████████████████████████████████| 23915/23915 [20:00<00:00, 19.92it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "2149\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "ct = 0\n", "\n", "valid_fits_paths = []\n", "\n", "for fits_path in tqdm(all_fits):\n", " with fits.open(fits_path) as hdul:\n", " try:\n", " if hdul[1].data.dtype == np.dtype('uint16'):\n", " #print(hdul.info())\n", " assert hdul[1].data.shape == hdul[4].data.shape\n", " ct += 1\n", " valid_fits_paths.append(fits_path)\n", " except:\n", " continue\n", " \n", "print(ct)" ] }, { "cell_type": "code", "execution_count": 33, "id": "cfad3290", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "File paths saved to valid_fits_paths.txt\n" ] } ], "source": [ "def save_filepaths_to_text(filepaths, output_file):\n", " try:\n", " with open(output_file, 'w') as f:\n", " for filepath in filepaths:\n", " f.write(filepath + '\\n')\n", " print(f\"File paths saved to {output_file}\")\n", " except Exception as e:\n", " print(f\"Error saving file paths: {e}\")\n", "\n", "save_filepaths_to_text(valid_fits_paths, \"valid_fits_paths.txt\")\n" ] }, { "cell_type": "code", "execution_count": 11, "id": "1a460324", "metadata": {}, "outputs": [], "source": [ "valid_fits_paths = os.listdir('./data')" ] }, { "cell_type": "code", "execution_count": 12, "id": "e68b6a9e", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████| 2149/2149 [02:00<00:00, 17.77it/s]\n" ] } ], "source": [ "# Initialize the list of confirmed FITS paths\n", "confirmed_fits_paths = []\n", "\n", "\"\"\"\n", "Because hubble FITS have two images at HDU 1 and 4,\n", "we need to calculate the union of the spherical polygons\n", "for both of these, when storing the total FITS polygons.\n", "\n", "\"\"\"\n", "\n", "all_polys = []\n", "\n", "for i in tqdm(range(len(valid_fits_paths))):\n", "\n", " path1 = os.path.join('data', valid_fits_paths[i])\n", " try:\n", " with fits.open(path1) as hdul1:\n", " wcs1a = WCS(hdul1[1].header)\n", " shape1a = sorted(tuple(wcs1a.pixel_shape))[:2]\n", "\n", " wcs1b = WCS(hdul1[4].header)\n", " shape1b = sorted(tuple(wcs1b.pixel_shape))[:2]\n", "\n", " # Get the footprints of the two WCS frames\n", " footprint1a = wcs1a.calc_footprint(axes=shape1a)\n", " footprint1b = wcs1b.calc_footprint(axes=shape1b)\n", "\n", "\n", " # Define two polygons\n", " poly1a = SphericalPolygon.from_radec(footprint1a[:, 0], footprint1a[:, 1])\n", " poly1b = SphericalPolygon.from_radec(footprint1b[:, 0], footprint1b[:, 1])\n", "\n", " poly1 = poly1a.union(poly1b)\n", "\n", " all_polys.append(poly1)\n", " except:\n", " continue" ] }, { "cell_type": "code", "execution_count": 13, "id": "72347e84", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|████████████████████████████████████| 2148/2148 [00:00<00:00, 77320.99it/s]\n" ] } ], "source": [ "latitudes = []\n", "longitudes = []\n", "\n", "for poly in tqdm(all_polys):\n", " pts = list(poly.to_radec())[0]\n", " ra = pts[0][0]\n", " dec = pts[1][0]\n", " \n", " longitudes.append(ra)\n", " latitudes.append(dec)" ] }, { "cell_type": "code", "execution_count": 14, "id": "a396a37f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Symmetric?\n", "True\n", "(2148, 2148)\n" ] } ], "source": [ "n_points = len(latitudes)\n", "\n", "# Repeat each point n_points times for lat1, lon1\n", "lat1 = np.repeat(latitudes, n_points)\n", "lon1 = np.repeat(longitudes, n_points)\n", "\n", "# Tile the whole array n_points times for lat2, lon2\n", "lat2 = np.tile(latitudes, n_points)\n", "lon2 = np.tile(longitudes, n_points)\n", "\n", "# Calculates angular separation between two spherical coords\n", "# This can be lat/lon or ra/dec\n", "# Taken from astropy\n", "def angular_separation_deg(lon1, lat1, lon2, lat2):\n", " lon1 = np.deg2rad(lon1)\n", " lon2 = np.deg2rad(lon2)\n", " lat1 = np.deg2rad(lat1)\n", " lat2 = np.deg2rad(lat2)\n", " \n", " sdlon = np.sin(lon2 - lon1)\n", " cdlon = np.cos(lon2 - lon1)\n", " slat1 = np.sin(lat1)\n", " slat2 = np.sin(lat2)\n", " clat1 = np.cos(lat1)\n", " clat2 = np.cos(lat2)\n", "\n", " num1 = clat2 * sdlon\n", " num2 = clat1 * slat2 - slat1 * clat2 * cdlon\n", " denominator = slat1 * slat2 + clat1 * clat2 * cdlon\n", "\n", " return np.rad2deg(np.arctan2(np.hypot(num1, num2), denominator))\n", "\n", "# Compute the pairwise angular separations\n", "angular_separations = angular_separation_deg(lon1, lat1, lon2, lat2)\n", "\n", "# Reshape the result into a matrix form\n", "angular_separations_matrix = angular_separations.reshape(n_points, n_points)\n", "\n", "def check_symmetric(a, rtol=1e-05, atol=1e-07):\n", " return np.allclose(a, a.T, rtol=rtol, atol=atol)\n", "\n", "print(\"Symmetric?\")\n", "print(check_symmetric(angular_separations_matrix))\n", "print(angular_separations_matrix.shape)" ] }, { "cell_type": "code", "execution_count": 15, "id": "ae7ed213", "metadata": {}, "outputs": [], "source": [ "HUBBLE_FOV = 0.057\n", "#JWST_FOV = 0.0366667\n", "\n", "THRESH = HUBBLE_FOV * 3\n", "\n", "clustering = AgglomerativeClustering(n_clusters=None, metric='precomputed', linkage='single', distance_threshold=THRESH)\n", "labels = clustering.fit_predict(angular_separations_matrix)" ] }, { "cell_type": "code", "execution_count": 17, "id": "bd5cc1db", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 1/1947 [00:00<03:29, 9.28it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "FAIL 0.2290158291821388\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 2%|▋ | 30/1947 [00:19<05:46, 5.54it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "FAIL 0.25478384325067566\n", "FAIL 0.11201573962968173\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\r", " 2%|▋ | 32/1947 [00:20<04:39, 6.86it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "FAIL 0.08182961205102905\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 6%|██▏ | 108/1947 [00:47<08:23, 3.65it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "FAIL 0.31680112298937957\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 24%|█████████▍ | 470/1947 [00:51<00:06, 231.53it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "FAIL 0.08882975311005689\n", "FAIL 0.008033477806590562\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████| 1947/1947 [00:51<00:00, 37.70it/s]\n" ] } ], "source": [ "failed_labels = []\n", "failed_paths = []\n", "\n", "for label in tqdm(np.unique(labels)):\n", " polys = [(all_polys[i], valid_fits_paths[i]) for i in range(len(labels)) if labels[i] == label]\n", " if len(polys) > 1:\n", " total_poly = polys[0][0]\n", " for i in range(1, len(polys)):\n", " new_poly = polys[i][0]\n", " new_path = polys[i][1]\n", " if total_poly.intersects_poly(new_poly):\n", " union_over_max = total_poly.intersection(new_poly).area() / new_poly.area()\n", " print(f\"FAIL {union_over_max}\")\n", " failed_labels.append(label)\n", " failed_paths.append(new_path)\n", " continue\n", " else:\n", " total_poly = total_poly.union(new_poly)\n" ] }, { "cell_type": "code", "execution_count": 18, "id": "7170d0e1", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['j9l919b6q_raw.fits',\n", " 'je2r07ajq_raw.fits',\n", " 'jcdm56ncq_raw.fits',\n", " 'j9fc0tqaq_raw.fits',\n", " 'jbpk02ioq_raw.fits',\n", " 'jepx44lrq_raw.fits',\n", " 'j9cx01cfq_raw.fits']" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "failed_paths" ] }, { "cell_type": "code", "execution_count": 105, "id": "5baea239", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "22 15\n", "58 12\n", "49 7\n", "55 7\n", "28 6\n", " ..\n", "1493 1\n", "1264 1\n", "1214 1\n", "1387 1\n", "141 1\n", "Name: count, Length: 1946, dtype: int64" ] }, "execution_count": 105, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pd.Series(labels).value_counts()" ] }, { "cell_type": "code", "execution_count": null, "id": "cbb7bf27", "metadata": {}, "outputs": [], "source": [ "# Function to plot the rectangle\n", "def plot_rectangle(corners):\n", " # Close the rectangle by repeating the first point at the end\n", " closed_corners = np.append(corners, [corners[0]], axis=0)\n", "\n", " # Plot the rectangle\n", " plt.plot(closed_corners[:, 0], closed_corners[:, 1], 'b-')\n", " plt.scatter(corners[:, 0], corners[:, 1], color='red')\n", " \n", " # Annotate the points\n", " for i, corner in enumerate(corners):\n", " plt.annotate(f'P{i+1}', (corner[0], corner[1]), textcoords=\"offset points\", xytext=(5,5), ha='center')\n", " \n", " plt.xlabel('Longitude')\n", " plt.ylabel('Latitude')\n", " plt.title('Rectangle Plot from Given Corners')\n", " plt.grid(True)\n", "\n", "# Call the function to plot the rectangle\n", "plot_rectangle(footprint1)\n", "plot_rectangle(footprint2)\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": 33, "id": "37557566", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "XTENSION= 'IMAGE ' / extension type BITPIX = 16 / bits per data value NAXIS = 2 / number of data axes NAXIS1 = 4144 / length of first data axis NAXIS2 = 2068 / length of second data axis PCOUNT = 0 / number of group parameters GCOUNT = 1 / number of groups INHERIT = T / inherit the primary header EXTNAME = 'SCI ' / extension name EXTVER = 1 / extension version number ROOTNAME= 'jcuh01euq ' / rootname of the observation setEXPNAME = 'jcuh01euq ' / exposure identifier DATAMIN = 2205. / the minimum value of the data DATAMAX = 51795. / the maximum value of the data BUNIT = 'COUNTS ' / brightness units BSCALE = 1.0 / scale factor for array value to physical value BZERO = 32768.0 / physical value for an array value of zero / WFC CCD CHIP IDENTIFICATION CCDCHIP = 2 / CCD chip (1 or 2) / World Coordinate System and Related Parameters WCSAXES = 2 / number of World Coordinate System axes CRPIX1 = 2124.0 / x-coordinate of reference pixel CRPIX2 = 1024.0 / y-coordinate of reference pixel CRVAL1 = 2.980193405890E+02 / first axis value at reference pixel CRVAL2 = 1.447422452918E+01 / second axis value at reference pixel CTYPE1 = 'RA---TAN' / the coordinate type for the first axis CTYPE2 = 'DEC--TAN' / the coordinate type for the second axis CD1_1 = 1.40038E-06 / partial of first axis coordinate w.r.t. x CD1_2 = 1.39725E-05 / partial of first axis coordinate w.r.t. y CD2_1 = 1.37888E-05 / partial of second axis coordinate w.r.t. x CD2_2 = -4.58499E-07 / partial of second axis coordinate w.r.t. y LTV1 = 24.0 / offset in X to subsection start LTV2 = 0.0 / offset in Y to subsection start RAW_LTV1= 24.0 / original offset in X to subsection start RAW_LTV2= 0.0 / original offset in Y to subsection start LTM1_1 = 1.0 / reciprocal of sampling rate in X LTM2_2 = 1.0 / reciprocal of sampling rate in Y ORIENTAT= 91.8795 / position angle of image y axis (deg. e of n) RA_APER = 2.980491666667E+02 / RA of aperture reference position DEC_APER= 1.447333333333E+01 / Declination of aperture reference position PA_APER = 91.4584 / Position Angle of reference aperture center (deVAFACTOR= 9.999498853766E-01 / velocity aberration plate scale factor / READOUT DEFINITION PARAMETERS CENTERA1= 2073 / subarray axis1 center pt in unbinned dect. pix CENTERA2= 1035 / subarray axis2 center pt in unbinned dect. pix SIZAXIS1= 4144 / subarray axis1 size in unbinned detector pixelsSIZAXIS2= 2068 / subarray axis2 size in unbinned detector pixelsBINAXIS1= 1 / axis1 data bin size in unbinned detector pixelsBINAXIS2= 1 / axis2 data bin size in unbinned detector pixels / PHOTOMETRY KEYWORDS PHOTMODE= ' ' / obserPHOTFLAM= 0.000000000000E+00 / inverse sensitivity, ergs/cm2/Ang/electron PHOTZPT = 0.000000 / ST magnitude zero point PHOTPLAM= 0.000000 / Pivot wavelength (Angstroms) PHOTBW = 0.000000 / RMS bandwidth of filter plus detector / REPEATED EXPOSURES INFO NCOMBINE= 1 / number of image sets combined during CR rejecti / DATA PACKET INFORMATION FILLCNT = 0 / number of segments containing fill ERRCNT = 0 / number of segments containing errors PODPSFF = F / podps fill present (T/F) STDCFFF = F / science telemetry fill data present (T=1/F=0) STDCFFP = '0x5569' / science telemetry fill pattern (hex) / ON-BOARD COMPRESSION INFORMATION WFCMPRSD= F / was WFC data compressed? (T/F) CBLKSIZ = 0 / size of compression block in 2-byte words LOSTPIX = 0 / #pixels lost due to buffer overflow COMPTYP = 'None ' / compression type performed (Partial/Full/None) / IMAGE STATISTICS AND DATA QUALITY FLAGS NGOODPIX= 8569792 / number of good pixels SDQFLAGS= 31743 / serious data quality flags GOODMIN = 2205. / minimum value of good pixels GOODMAX = 51795. / maximum value of good pixels GOODMEAN= 2346.49479940703 / mean value of good pixels SOFTERRS= 0 / number of soft error pixels (DQF=1) SNRMIN = 0.000000 / minimum signal to noise of good pixels SNRMAX = 0.000000 / maximum signal to noise of good pixels SNRMEAN = 0.000000 / mean value of signal to noise of good pixels MEANDARK= 0.000000 / average of the dark values subtracted MEANBLEV= 0.000000 / average of all bias levels subtracted MEANFLSH= 0.000000 / Mean number of counts in post flash exposure END \n" ] } ], "source": [ "fitpath = \"./data/jcuh01euq_raw.fits\"\n", "\n", "with fits.open(fitpath) as hdul1:\n", " print(hdul1[1].header)" ] }, { "cell_type": "code", "execution_count": 22, "id": "da51818b", "metadata": {}, "outputs": [], "source": [ "def get_corners_and_metadata(fits_path):\n", " with fits.open(fits_path) as hdul1:\n", " wcs1a = WCS(hdul1[1].header)\n", " shape1a = sorted(tuple(wcs1a.pixel_shape))[:2]\n", " footprint1a = wcs1a.calc_footprint(axes=shape1a)\n", " coords = list(footprint1a.flatten())\n", " inf = hdul1[0].header\n", " ra_targ = inf['RA_TARG']\n", " dec_targ = inf['DEC_TARG']\n", " exp_time = inf['EXPTIME']\n", " \n", " return coords" ] }, { "cell_type": "code", "execution_count": 23, "id": "cac0e38b", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████| 2142/2142 [00:26<00:00, 80.15it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ " fits_file ra1 dec1 ra2 dec2 \n", "0 jcuh01euq_raw.fits 298.001510 14.445419 298.061288 14.443517 \\\n", "1 jbkh05h9q_raw.fits 287.858027 -60.066247 287.752068 -60.042645 \n", "2 jcnu10r9q_raw.fits 201.025443 -43.460893 201.056301 -43.407484 \n", "3 jdba3qokq_raw.fits 141.681252 -24.804074 141.744131 -24.813923 \n", "4 jdrz77m0q_raw.fits 150.936991 40.747275 151.004173 40.719653 \n", "... ... ... ... ... ... \n", "2136 jbkz29rzq_raw.fits 138.498478 40.943594 138.437903 40.979121 \n", "2137 jdba3bi3q_raw.fits 131.959813 72.952872 131.893400 73.007432 \n", "2138 jbkz90hxq_raw.fits 46.020489 -44.048250 46.094849 -44.070590 \n", "2139 jcb805vtq_raw.fits 182.552746 49.999954 182.561072 49.942283 \n", "2140 jdba8aw2q_raw.fits 224.393934 -19.196982 224.334389 -19.183120 \n", "\n", " ra3 dec3 ra4 dec4 exposure_time \n", "0 298.064282 14.472018 298.004497 14.473921 580.0 \n", "1 287.725102 -60.067934 287.831124 -60.091557 500.0 \n", "2 201.021070 -43.394620 200.990187 -43.448019 430.0 \n", "3 141.751603 -24.786090 141.688738 -24.776244 348.0 \n", "4 151.024449 40.743832 150.957250 40.771467 390.0 \n", "... ... ... ... ... ... \n", "2136 138.412650 40.957740 138.473220 40.922227 400.0 \n", "2137 131.799083 72.999647 131.865760 72.945118 348.0 \n", "2138 46.112684 -44.044969 46.038349 -44.022640 400.0 \n", "2139 182.605575 49.942966 182.597302 50.000640 659.0 \n", "2140 224.325127 -19.210400 224.384680 -19.224265 348.0 \n", "\n", "[2141 rows x 10 columns]\n" ] } ], "source": [ "# Directory containing the FITS files\n", "data_dir = './data'\n", "\n", "# List to hold the data for the DataFrame\n", "data = []\n", "\n", "# Loop through all FITS files in the \"data\" directory\n", "for fits_file in tqdm(os.listdir(data_dir)):\n", " if fits_file.endswith('.fits'):\n", " file_path = os.path.join(data_dir, fits_file)\n", " ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4, exposure_time = get_corners_and_metadata(file_path)\n", " data.append([fits_file, ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4, exposure_time])\n", "\n", "# Create a DataFrame\n", "df = pd.DataFrame(data, columns=['fits_file', 'ra1', 'dec1', 'ra2', 'dec2', 'ra3', 'dec3', 'ra4', 'dec4', 'exposure_time'])\n", "\n", "# Display the DataFrame\n", "print(df)" ] }, { "cell_type": "code", "execution_count": 29, "id": "fc086514", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\n" ] } ], "source": [ "import pandas as pd\n", "from sklearn.model_selection import train_test_split\n", "\n", "# Assuming df is your DataFrame\n", "# df = pd.DataFrame(...) # Your DataFrame should already be defined\n", "\n", "# Perform an 85/15 train-test split\n", "train_df, test_df = train_test_split(df, test_size=0.15, random_state=42)\n", "\n", "# Save the train and test DataFrames to CSV files\n", "train_df.to_csv('train_split.csv', index=False)\n", "test_df.to_csv('test_split.csv', index=False)\n", "\n", "print(\"Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\")" ] }, { "cell_type": "code", "execution_count": 28, "id": "ab4a9a6f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['README.md',\n", " 'hst_FINAL.csv',\n", " 'hubble_data_filtering.ipynb',\n", " 'data',\n", " 'valid_fits_paths.txt',\n", " 'SBI-16-2D.py',\n", " '.gitattributes',\n", " '.git',\n", " '.ipynb_checkpoints']" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "os.listdir('.')" ] }, { "cell_type": "code", "execution_count": 30, "id": "2a77c29e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CSV file has been converted and saved as JSONL at test_split.jsonl\n", "CSV file has been converted and saved as JSONL at train_split.jsonl\n" ] } ], "source": [ "import pandas as pd\n", "\n", "names = [\"test_split\", \"train_split\"]\n", "\n", "for name in names:\n", "\n", " # Step 1: Load the CSV file into a DataFrame\n", " csv_file_path = f'{name}.csv' # Replace with your actual CSV file path\n", " df = pd.read_csv(csv_file_path)\n", "\n", " # Step 2: Save the DataFrame as a JSONL file\n", " jsonl_file_path = f'{name}.jsonl' # Replace with your desired output file path\n", " df.to_json(jsonl_file_path, orient='records', lines=True)\n", "\n", " print(f\"CSV file has been converted and saved as JSONL at {jsonl_file_path}\")" ] }, { "cell_type": "code", "execution_count": null, "id": "c78322ff", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" } }, "nbformat": 4, "nbformat_minor": 5 }