Datasets:
File size: 24,427 Bytes
9caafd9 |
1 |
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"toc_visible":true,"machine_shape":"hm","authorship_tag":"ABX9TyNCjdoNZhish6uzw3kOYuO+"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"9m84JFrYlumk","executionInfo":{"status":"ok","timestamp":1708367355834,"user_tz":300,"elapsed":17348,"user":{"displayName":"Johnny Ye","userId":"16268450102215689935"}},"colab":{"base_uri":"https://localhost:8080/"},"outputId":"3825a409-1170-4544-ab94-655a56075d2a"},"outputs":[{"output_type":"stream","name":"stdout","text":["Collecting datasets\n"," Downloading datasets-2.17.1-py3-none-any.whl (536 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m536.7/536.7 kB\u001b[0m \u001b[31m3.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from datasets) (3.13.1)\n","Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from datasets) (1.25.2)\n","Collecting pyarrow>=12.0.0 (from datasets)\n"," Downloading pyarrow-15.0.0-cp310-cp310-manylinux_2_28_x86_64.whl (38.3 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m38.3/38.3 MB\u001b[0m \u001b[31m14.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: pyarrow-hotfix in /usr/local/lib/python3.10/dist-packages (from datasets) (0.6)\n","Collecting dill<0.3.9,>=0.3.0 (from datasets)\n"," Downloading dill-0.3.8-py3-none-any.whl (116 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m116.3/116.3 kB\u001b[0m \u001b[31m13.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets) (1.5.3)\n","Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (2.31.0)\n","Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (4.66.2)\n","Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets) (3.4.1)\n","Collecting multiprocess (from datasets)\n"," Downloading multiprocess-0.70.16-py310-none-any.whl (134 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m13.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: fsspec[http]<=2023.10.0,>=2023.1.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (2023.6.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets) (3.9.3)\n","Requirement already satisfied: huggingface-hub>=0.19.4 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.20.3)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from datasets) (23.2)\n","Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (6.0.1)\n","Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.3.1)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (23.2.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.4.1)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (6.0.5)\n","Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.9.4)\n","Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (4.0.3)\n","Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.19.4->datasets) (4.9.0)\n","Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (3.3.2)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (3.6)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (2.0.7)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (2024.2.2)\n","Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2.8.2)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2023.4)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n","Installing collected packages: pyarrow, dill, multiprocess, datasets\n"," Attempting uninstall: pyarrow\n"," Found existing installation: pyarrow 10.0.1\n"," Uninstalling pyarrow-10.0.1:\n"," Successfully uninstalled pyarrow-10.0.1\n","\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n","ibis-framework 7.1.0 requires pyarrow<15,>=2, but you have pyarrow 15.0.0 which is incompatible.\u001b[0m\u001b[31m\n","\u001b[0mSuccessfully installed datasets-2.17.1 dill-0.3.8 multiprocess-0.70.16 pyarrow-15.0.0\n"]}],"source":["# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.\n","#\n","# Licensed under the Apache License, Version 2.0 (the \"License\");\n","# you may not use this file except in compliance with the License.\n","# You may obtain a copy of the License at\n","#\n","# http://www.apache.org/licenses/LICENSE-2.0\n","#\n","# Unless required by applicable law or agreed to in writing, software\n","# distributed under the License is distributed on an \"AS IS\" BASIS,\n","# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n","# See the License for the specific language governing permissions and\n","# limitations under the License.\n","# TODO: Address all TODOs and remove all explanatory comments\n","\"\"\"TODO: Add a description here.\"\"\"\n","\n","!pip install datasets\n","\n","import csv\n","import json\n","import os\n","from typing import List\n","import datasets\n","import logging\n","\n","# TODO: Add BibTeX citation\n","# Find for instance the citation on arxiv or on the dataset repo/website\n","_CITATION = \"\"\"\\\n","@InProceedings{huggingface:dataset,\n","title = {A great new dataset},\n","author={huggingface, Inc.\n","},\n","year={2020}\n","}\n","\"\"\"\n","\n","# TODO: Add description of the dataset here\n","# You can copy an official description\n","_DESCRIPTION = \"\"\"\\\n","This new dataset is designed to solve this great NLP task and is crafted with a lot of care.\n","\"\"\"\n","\n","# TODO: Add a link to an official homepage for the dataset here\n","_HOMEPAGE = \"https://www.yelp.com/dataset/download\"\n","\n","# TODO: Add the licence for the dataset here if you can find it\n","_LICENSE = \"\"\n","\n"]},{"cell_type":"code","source":["import json\n","import datasets\n","\n","class YelpDataset(datasets.GeneratorBasedBuilder):\n"," \"\"\"Yelp Dataset focusing on restaurant reviews.\"\"\"\n","\n"," VERSION = datasets.Version(\"1.1.0\")\n","\n"," BUILDER_CONFIGS = [\n"," datasets.BuilderConfig(name=\"restaurants\", version=VERSION, description=\"This part of my dataset covers a wide range of restaurants\"),\n"," ]\n","\n"," DEFAULT_CONFIG_NAME = \"restaurants\"\n","\n"," _URL = \"https://yelpdata.s3.us-west-2.amazonaws.com/\"\n"," _URLS = {\n"," \"business\": _URL + \"yelp_academic_dataset_business.json\",\n"," \"review\": _URL + \"yelp_academic_dataset_review.json\",\n"," }\n","\n"," def _info(self):\n"," return datasets.DatasetInfo(\n"," description=_DESCRIPTION,\n"," features=datasets.Features(\n"," {\n"," \"business_id\": datasets.Value(\"string\"),\n"," \"name\": datasets.Value(\"string\"),\n"," \"categories\": datasets.Value(\"string\"),\n"," \"review_id\": datasets.Value(\"string\"),\n"," \"user_id\": datasets.Value(\"string\"),\n"," \"stars\": datasets.Value(\"float\"),\n"," \"text\": datasets.Value(\"string\"),\n"," \"date\": datasets.Value(\"string\"),\n"," }\n"," ),\n"," supervised_keys=None,\n"," homepage=\"https://www.yelp.com/dataset/download\",\n"," citation=_CITATION,\n"," )\n","\n"," def _split_generators(self, dl_manager: datasets.DownloadManager):\n"," \"\"\"Returns SplitGenerators.\"\"\"\n"," downloaded_files = dl_manager.download_and_extract(self._URLS)\n","\n"," return [\n"," datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={\"business_path\": downloaded_files[\"business\"], \"review_path\": downloaded_files[\"review\"], \"split\": \"train\"}),\n"," datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={\"business_path\": downloaded_files[\"business\"], \"review_path\": downloaded_files[\"review\"], \"split\": \"test\"}),\n"," ]\n","\n"," def _generate_examples(self, business_path, review_path, split):\n"," \"\"\"Yields examples as (key, example) tuples.\"\"\"\n","\n"," # Load businesses and filter for restaurants\n"," with open(business_path, encoding=\"utf-8\") as f:\n"," businesses = {}\n"," for line in f:\n"," business = json.loads(line)\n"," if business.get(\"categories\") and \"Restaurants\" in business[\"categories\"]:\n"," businesses[business['business_id']] = business\n","\n"," # Generate examples\n"," with open(review_path, encoding=\"utf-8\") as f:\n"," for line in f:\n"," review = json.loads(line)\n"," business_id = review['business_id']\n"," if business_id in businesses:\n"," yield review['review_id'], {\n"," \"business_id\": business_id,\n"," \"name\": businesses[business_id]['name'],\n"," \"categories\": businesses[business_id]['categories'],\n"," \"review_id\": review['review_id'],\n"," \"user_id\": review['user_id'],\n"," \"stars\": review['stars'],\n"," \"text\": review['text'],\n"," \"date\": review['date'],\n"," }\n"],"metadata":{"id":"U8wXinbsScRt","executionInfo":{"status":"ok","timestamp":1710285488659,"user_tz":420,"elapsed":217,"user":{"displayName":"Johnny Ye","userId":"16268450102215689935"}}},"execution_count":4,"outputs":[]},{"cell_type":"markdown","source":["# New Version"],"metadata":{"id":"mZy6UxI-LroL"}},{"cell_type":"code","source":["!pip install datasets"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"veKI9DfyQ70-","executionInfo":{"status":"ok","timestamp":1710273849091,"user_tz":420,"elapsed":1033,"user":{"displayName":"Johnny Ye","userId":"16268450102215689935"}},"outputId":"9ea5357e-3acc-4c0e-bd8c-f0e041d7743c"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Installing collected packages: xxhash, dill, multiprocess, datasets\n","Successfully installed datasets-2.18.0 dill-0.3.8 multiprocess-0.70.16 xxhash-3.4.1\n"]}]},{"cell_type":"code","source":["# -*- coding: utf-8 -*-\n","\"\"\"yelp_dataset.ipynb\n","\n","Automatically generated by Colaboratory.\n","\n","Original file is located at\n"," https://colab.research.google.com/drive/14UtK4YCjMSx4cVbUb9NBRHviWZg07dtY\n","\"\"\"\n","\n","# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.\n","#\n","# Licensed under the Apache License, Version 2.0 (the \"License\");\n","# you may not use this file except in compliance with the License.\n","# You may obtain a copy of the License at\n","#\n","# http://www.apache.org/licenses/LICENSE-2.0\n","#\n","# Unless required by applicable law or agreed to in writing, software\n","# distributed under the License is distributed on an \"AS IS\" BASIS,\n","# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n","# See the License for the specific language governing permissions and\n","# limitations under the License.\n","# TODO: Address all TODOs and remove all explanatory comments\n","\"\"\"TODO: Add a description here.\"\"\"\n","\n","\n","import csv\n","import json\n","import os\n","from typing import List\n","import datasets\n","import logging\n","\n","# TODO: Add BibTeX citation\n","# Find for instance the citation on arxiv or on the dataset repo/website\n","_CITATION = \"\"\"\\\n","@InProceedings{huggingface:dataset,\n","title = {A great new dataset},\n","author={huggingface, Inc.\n","},\n","year={2020}\n","}\n","\"\"\"\n","\n","# TODO: Add description of the dataset here\n","# You can copy an official description\n","_DESCRIPTION = \"\"\"\\\n","This dataset encompasses a wealth of information from the Yelp platform,\n"," detailing user reviews, business ratings, and operational specifics across a diverse array of local establishments.\n","\"\"\"\n","\n","# TODO: Add a link to an official homepage for the dataset here\n","_HOMEPAGE = \"https://www.yelp.com/dataset/download\"\n","\n","# TODO: Add the licence for the dataset here if you can find it\n","_LICENSE = \"https://s3-media0.fl.yelpcdn.com/assets/srv0/engineering_pages/f64cb2d3efcc/assets/vendor/Dataset_User_Agreement.pdf\"\n","\n","# TODO: Add link to the official dataset URLs here\n","# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.\n","# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)\n","_URL = \"https://yelpdata.s3.us-west-2.amazonaws.com/\"\n","_URLS = {\n"," \"business\": _URL + \"yelp_academic_dataset_business.json\",\n"," \"review\": _URL + \"yelp_academic_dataset_review.json\",\n","}\n","# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case\n","class YelpDataset(datasets.GeneratorBasedBuilder):\n"," \"\"\"TODO: Short description of my dataset.\"\"\"\n","\n"," _URLS = _URLS\n"," VERSION = datasets.Version(\"1.1.0\")\n","\n"," def _info(self):\n"," return datasets.DatasetInfo(\n"," description=_DESCRIPTION,\n"," features=datasets.Features(\n"," {\n"," \"business_id\": datasets.Value(\"string\"),\n"," \"name\": datasets.Value(\"string\"),\n"," \"address\": datasets.Value(\"string\"),\n"," \"city\": datasets.Value(\"string\"),\n"," \"state\": datasets.Value(\"string\"),\n"," \"postal_code\": datasets.Value(\"string\"),\n"," \"latitude\": datasets.Value(\"float\"),\n"," \"longitude\": datasets.Value(\"float\"),\n"," \"stars_x\": datasets.Value(\"float\"),\n"," \"review_count\": datasets.Value(\"float\"),\n"," \"is_open\": datasets.Value(\"float\"),\n"," \"categories\": datasets.Value(\"string\"),\n"," \"hours\": datasets.Value(\"string\"),\n"," \"review_id\": datasets.Value(\"string\"),\n"," \"user_id\": datasets.Value(\"string\"),\n"," \"stars_y\": datasets.Value(\"float\"),\n"," \"useful\": datasets.Value(\"float\"),\n"," \"funny\": datasets.Value(\"float\"),\n"," \"cool\": datasets.Value(\"float\"),\n"," \"text\": datasets.Value(\"string\"),\n"," \"date\": datasets.Value(\"string\"),\n"," \"attributes\": datasets.Value(\"string\"),\n"," }),\n"," # No default supervised_keys (as we have to pass both question\n"," # and context as input).\n"," supervised_keys=None,\n"," homepage=\"https://www.yelp.com/dataset/download\",\n"," citation=_CITATION,\n"," )\n","\n"," def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:\n"," urls_to_download = self._URLS\n"," downloaded_files = dl_manager.download_and_extract(urls_to_download)\n","\n"," return [\n"," datasets.SplitGenerator(name=datasets.Split.BUSINESS, gen_kwargs={\"filepath\": downloaded_files[\"business\"]}),\n"," datasets.SplitGenerator(name=datasets.Split.REVIEW, gen_kwargs={\"filepath\": downloaded_files[\"review\"]}),\n"," ]\n","\n","\n"," def _generate_examples(self, filepath):\n"," \"\"\"This function returns the examples in the raw (text) form.\"\"\"\n"," logging.info(\"generating examples from = %s\", filepath)\n"," with open(filepath, encoding=\"utf-8\") as csv_file:\n"," reader = csv.DictReader(csv_file)\n"," for i, row in enumerate(reader):\n"," # Handle missing values for float fields\n"," for key, value in row.items():\n"," if value == '':\n"," # Assuming all fields that can be empty are floats; adjust as needed\n"," row[key] = None\n"," yield i, row\n","\n"],"metadata":{"id":"8pQKwMXBF0qZ","executionInfo":{"status":"ok","timestamp":1710273885042,"user_tz":420,"elapsed":1821,"user":{"displayName":"Johnny Ye","userId":"16268450102215689935"}}},"execution_count":2,"outputs":[]},{"cell_type":"markdown","source":["# Old Version"],"metadata":{"id":"BKXLWWM9RvxH"}},{"cell_type":"code","source":["# -*- coding: utf-8 -*-\n","\"\"\"yelp_dataset.ipynb\n","\n","Automatically generated by Colaboratory.\n","\n","Original file is located at\n"," https://colab.research.google.com/drive/14UtK4YCjMSx4cVbUb9NBRHviWZg07dtY\n","\"\"\"\n","\n","# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.\n","#\n","# Licensed under the Apache License, Version 2.0 (the \"License\");\n","# you may not use this file except in compliance with the License.\n","# You may obtain a copy of the License at\n","#\n","# http://www.apache.org/licenses/LICENSE-2.0\n","#\n","# Unless required by applicable law or agreed to in writing, software\n","# distributed under the License is distributed on an \"AS IS\" BASIS,\n","# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n","# See the License for the specific language governing permissions and\n","# limitations under the License.\n","# TODO: Address all TODOs and remove all explanatory comments\n","\"\"\"TODO: Add a description here.\"\"\"\n","\n","\n","import csv\n","import json\n","import os\n","from typing import List\n","import datasets\n","import logging\n","\n","# TODO: Add BibTeX citation\n","# Find for instance the citation on arxiv or on the dataset repo/website\n","_CITATION = \"\"\"\\\n","@InProceedings{huggingface:dataset,\n","title = {A great new dataset},\n","author={huggingface, Inc.\n","},\n","year={2020}\n","}\n","\"\"\"\n","\n","# TODO: Add description of the dataset here\n","# You can copy an official description\n","_DESCRIPTION = \"\"\"\\\n","This new dataset is designed to solve this great NLP task and is crafted with a lot of care.\n","\"\"\"\n","\n","# TODO: Add a link to an official homepage for the dataset here\n","_HOMEPAGE = \"https://www.yelp.com/dataset/download\"\n","\n","# TODO: Add the licence for the dataset here if you can find it\n","_LICENSE = \"\"\n","\n","# TODO: Add link to the official dataset URLs here\n","# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.\n","# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)\n","_URL = \"https://yelpdata.s3.us-west-2.amazonaws.com/\"\n","_URLS = {\n"," \"train\": _URL + \"yelp_train.csv\",\n"," \"test\": _URL + \"yelp_test.csv\",\n","}\n","# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case\n","class YelpDataset(datasets.GeneratorBasedBuilder):\n"," \"\"\"TODO: Short description of my dataset.\"\"\"\n","\n"," _URLS = _URLS\n"," VERSION = datasets.Version(\"1.1.0\")\n","\n"," def _info(self):\n"," return datasets.DatasetInfo(\n"," description=_DESCRIPTION,\n"," features=datasets.Features(\n"," {\n"," \"business_id\": datasets.Value(\"string\"),\n"," \"name\": datasets.Value(\"string\"),\n"," \"address\": datasets.Value(\"string\"),\n"," \"city\": datasets.Value(\"string\"),\n"," \"state\": datasets.Value(\"string\"),\n"," \"postal_code\": datasets.Value(\"string\"),\n"," \"latitude\": datasets.Value(\"float\"),\n"," \"longitude\": datasets.Value(\"float\"),\n"," \"stars_x\": datasets.Value(\"float\"),\n"," \"review_count\": datasets.Value(\"float\"),\n"," \"is_open\": datasets.Value(\"float\"),\n"," \"categories\": datasets.Value(\"string\"),\n"," \"hours\": datasets.Value(\"string\"),\n"," \"review_id\": datasets.Value(\"string\"),\n"," \"user_id\": datasets.Value(\"string\"),\n"," \"stars_y\": datasets.Value(\"float\"),\n"," \"useful\": datasets.Value(\"float\"),\n"," \"funny\": datasets.Value(\"float\"),\n"," \"cool\": datasets.Value(\"float\"),\n"," \"text\": datasets.Value(\"string\"),\n"," \"date\": datasets.Value(\"string\"),\n"," \"attributes\": datasets.Value(\"string\"),\n"," }),\n"," # No default supervised_keys (as we have to pass both question\n"," # and context as input).\n"," supervised_keys=None,\n"," homepage=\"https://www.yelp.com/dataset/download\",\n"," citation=_CITATION,\n"," )\n","\n"," def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:\n"," urls_to_download = self._URLS\n"," downloaded_files = dl_manager.download_and_extract(urls_to_download)\n","\n"," return [\n"," datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={\"filepath\": downloaded_files[\"train\"]}),\n"," datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={\"filepath\": downloaded_files[\"test\"]}),\n"," ]\n","\n","\n"," def _generate_examples(self, filepath):\n"," \"\"\"This function returns the examples in the raw (text) form.\"\"\"\n"," logging.info(\"generating examples from = %s\", filepath)\n"," with open(filepath, encoding=\"utf-8\") as csv_file:\n"," reader = csv.DictReader(csv_file)\n"," for i, row in enumerate(reader):\n"," # Handle missing values for float fields\n"," for key, value in row.items():\n"," if value == '':\n"," # Assuming all fields that can be empty are floats; adjust as needed\n"," row[key] = None\n"," yield i, row\n","\n"],"metadata":{"id":"Kxsu_aqZRy11"},"execution_count":null,"outputs":[]}]} |