Datasets:
Error loading dataset
I'm getting an error when I try to load the dataset - stack trace below. There's a similar error in the huggingface data preview; see attached image.
from datasets import load_dataset
load_dataset('Salesforce/GiftEval')
Downloading data: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 58/58 [02:14<00:00, 2.32s/files]
Generating train split: 323 examples [00:00, 1688.01 examples/s]Failed to read file '/Users/geoff/.cache/huggingface/hub/datasets--Salesforce--GiftEval/snapshots/930b5513aed532a99dc260c38893223738858044/M_DENSE/D/data-00000-of-00001.arrow' with error <class 'datasets.table.CastError'>: Couldn't cast
item_id: string
start: timestamp[s]
freq: string
target: list<item: float>
child 0, item: float
past_feat_dynamic_real: fixed_size_list<item: list<item: float>>[7]
child 0, item: list<item: float>
child 0, item: float
-- schema metadata --
huggingface: '{"info": {"features": {"item_id": {"dtype": "string", "_typ' + 347
to
{'item_id': Value(dtype='string', id=None), 'start': Value(dtype='timestamp[s]', id=None), 'freq': Value(dtype='string', id=None), 'target': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None)}
because column names don't match
Generating train split: 969 examples [00:00, 4605.30 examples/s]
CastError Traceback (most recent call last)
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/builder.py:1854, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
1853 _time = time.time()
-> 1854 for _, table in generator:
1855 if max_shard_size is not None and writer._num_bytes > max_shard_size:
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/packaged_modules/arrow/arrow.py:76, in Arrow.generate_tables(self, files)
73 # Uncomment for debugging (will print the Arrow table size and elements)
74 # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
75 # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
---> 76 yield f"{file_idx}{batch_idx}", self._cast_table(pa_table)
77 except ValueError as e:
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/packaged_modules/arrow/arrow.py:59, in Arrow._cast_table(self, pa_table)
56 if self.info.features is not None:
57 # more expensive cast to support nested features with keys in a different order
58 # allows str <-> int/float or str to Audio for example
---> 59 pa_table = table_cast(pa_table, self.info.features.arrow_schema)
60 return pa_table
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/table.py:2292, in table_cast(table, schema)
2291 if table.schema != schema:
-> 2292 return cast_table_to_schema(table, schema)
2293 elif table.schema.metadata != schema.metadata:
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/table.py:2240, in cast_table_to_schema(table, schema)
2239 if not table_column_names <= set(schema.names):
-> 2240 raise CastError(
2241 f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match",
2242 table_column_names=table.column_names,
2243 requested_column_names=list(features),
2244 )
2245 arrays = [
2246 cast_array_to_feature(
2247 table[name] if name in table_column_names else pa.array([None] * len(table), type=schema.field(name).type),
(...)
2250 for name, feature in features.items()
2251 ]
CastError: Couldn't cast
item_id: string
start: timestamp[s]
freq: string
target: list<item: float>
child 0, item: float
past_feat_dynamic_real: fixed_size_list<item: list<item: float>>[7]
child 0, item: list<item: float>
child 0, item: float
-- schema metadata --
huggingface: '{"info": {"features": {"item_id": {"dtype": "string", "_typ' + 347
to
{'item_id': Value(dtype='string', id=None), 'start': Value(dtype='timestamp[s]', id=None), 'freq': Value(dtype='string', id=None), 'target': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None)}
because column names don't match
The above exception was the direct cause of the following exception:
DatasetGenerationError Traceback (most recent call last)
Cell In[6], line 1
----> 1 load_dataset('Salesforce/GiftEval')
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/load.py:2154, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, keep_in_memory, save_infos, revision, token, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
2151 return builder_instance.as_streaming_dataset(split=split)
2153 # Download and prepare data
-> 2154 builder_instance.download_and_prepare(
2155 download_config=download_config,
2156 download_mode=download_mode,
2157 verification_mode=verification_mode,
2158 num_proc=num_proc,
2159 storage_options=storage_options,
2160 )
2162 # Build dataset for splits
2163 keep_in_memory = (
2164 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
2165 )
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/builder.py:924, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, dl_manager, base_path, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
922 if num_proc is not None:
923 prepare_split_kwargs["num_proc"] = num_proc
--> 924 self._download_and_prepare(
925 dl_manager=dl_manager,
926 verification_mode=verification_mode,
927 **prepare_split_kwargs,
928 **download_and_prepare_kwargs,
929 )
930 # Sync info
931 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/builder.py:1000, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
996 split_dict.add(split_generator.split_info)
998 try:
999 # Prepare split will record examples associated to the split
-> 1000 self._prepare_split(split_generator, **prepare_split_kwargs)
1001 except OSError as e:
1002 raise OSError(
1003 "Cannot find data file. "
1004 + (self.manual_download_instructions or "")
1005 + "\nOriginal error:\n"
1006 + str(e)
1007 ) from None
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/builder.py:1741, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, num_proc, max_shard_size)
1739 job_id = 0
1740 with pbar:
-> 1741 for job_id, done, content in self._prepare_split_single(
1742 gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
1743 ):
1744 if done:
1745 result = content
File /opt/anaconda3/envs/gifteval/lib/python3.12/site-packages/datasets/builder.py:1897, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
1895 if isinstance(e, DatasetGenerationError):
1896 raise
-> 1897 raise DatasetGenerationError("An error occurred while generating the dataset") from e
1899 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
DatasetGenerationError: An error occurred while generating the dataset
Hi @geoffdavis , thanks for raising this issue. In fact our dataset should be dowloaded locally and loaded from disk to be processed with our code, you can find detailed instructions there. But following could get you started to download the dataset locally:
huggingface-cli download Salesforce/GiftEval --repo-type=dataset --local-dir PATH_TO_SAVE
Thanks for the quick response! I was too eager to explore your raw data to do the full installation of your repository. Your suggested fix works - I appreciate it. I'm looking forward to your pretraining data being available as well.