|
import os |
|
import json |
|
from datasets import DatasetBuilder, SplitGenerator, DownloadManager, DatasetInfo, Features, Value, Sequence |
|
|
|
class MultiFileDataset(DatasetBuilder): |
|
"""Example of a Hugging Face dataset script for handling multiple files per split.""" |
|
|
|
VERSION = "1.0.0" |
|
|
|
def _info(self): |
|
return DatasetInfo( |
|
description="This dataset includes multiple JSON lines files for text classification.", |
|
features=Features({ |
|
"question": Value("string"), |
|
"options": Sequence(Value("string")), |
|
"answer": Value("string"), |
|
"prompt": Value("string"), |
|
"num_options": Value("string"), |
|
"question_type": Value("string"), |
|
"exam_id": Value("string") |
|
}), |
|
homepage="https://www.example.com/mydataset", |
|
citation="Cite the source here if applicable.", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
return [ |
|
SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": os.path.join(self.config.data_dir, "train/mcq")}, |
|
), |
|
SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": os.path.join(self.config.data_dir, "test/mcq")}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples from multiple files.""" |
|
for file in sorted(os.listdir(filepath)): |
|
full_file_path = os.path.join(filepath, file) |
|
if full_file_path.endswith('.jsonl'): |
|
with open(full_file_path, encoding="utf-8") as f: |
|
for id, line in enumerate(f): |
|
data = json.loads(line) |
|
yield f"{file}_{id}", { |
|
"question": data["question"], |
|
"options": data["options"], |
|
"answer": data["answer"], |
|
"prompt": data["prompt"], |
|
"num_options": int(data["num_options"]), |
|
"question_type": data["question_type"], |
|
"exam_id": data.get("exam_id", "") |
|
} |
|
|