import os import json from datasets import DatasetBuilder, SplitGenerator, DownloadManager, DatasetInfo, Features, Value, Sequence class MultiFileDataset(DatasetBuilder): """Example of a Hugging Face dataset script for handling multiple files per split.""" VERSION = "1.0.0" def _info(self): return DatasetInfo( description="This dataset includes multiple JSON lines files for text classification.", features=Features({ "question": Value("string"), "options": Sequence(Value("string")), "answer": Value("string"), "prompt": Value("string"), "num_options": Value("string"), # Assuming numeric values are expected for this field "question_type": Value("string"), "exam_id": Value("string") # Assuming all records will have an exam_id; use default value otherwise }), homepage="https://www.example.com/mydataset", citation="Cite the source here if applicable.", ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # Assuming `data_dir` is your dataset's root directory on Hugging Face return [ SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(self.config.data_dir, "train/mcq")}, ), SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(self.config.data_dir, "test/mcq")}, ), ] def _generate_examples(self, filepath): """Yields examples from multiple files.""" for file in sorted(os.listdir(filepath)): # Ensures consistent order full_file_path = os.path.join(filepath, file) if full_file_path.endswith('.jsonl'): # Ensure to process only JSON Lines files with open(full_file_path, encoding="utf-8") as f: for id, line in enumerate(f): data = json.loads(line) yield f"{file}_{id}", { "question": data["question"], "options": data["options"], "answer": data["answer"], "prompt": data["prompt"], "num_options": int(data["num_options"]), # Convert to int if it's a string in JSON "question_type": data["question_type"], "exam_id": data.get("exam_id", "") # Check if this optional field exists }