m_qalm / m_qalm.py
anand-s's picture
Rename test_repo.py to m_qalm.py
4b8e79b verified
import json
import os
import datasets
import ast
_DESCRIPTION = """\
The M-QALM Dataset Repository contains Multiple-Choice and Abstractive Questions for evaluating the performance of LLMs in the clinical and biomedical domain.
"""
_HOMEPAGE = "https://huggingface.co/datasets/anand-s/m_qalm"
_LICENSE = "Apache License 2.0"
# Define URLs for different parts of the dataset if applicable
_URLS = {
"train_normal_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/train_normal_mcqa.zip",
"val_normal_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/val_normal_mcqa.zip",
"test_normal_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/test_normal_mcqa.zip",
"train_context_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/train_context_mcqa.zip",
"val_context_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/val_context_mcqa.zip",
"test_context_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/test_context_mcqa.zip",
"train_aqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/train_aqa.zip",
"val_aqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/val_aqa.zip",
"test_aqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/test_aqa.zip",
}
class MQalm(datasets.GeneratorBasedBuilder):
"""Dataset for multiple choice questions from Test Repo."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="train_normal_mcqa", version=VERSION, description="Train set MCQA"),
datasets.BuilderConfig(name="val_normal_mcqa", version=VERSION, description="Val set MCQA"),
datasets.BuilderConfig(name="test_normal_mcqa", version=VERSION, description="Test set MCQA"),
datasets.BuilderConfig(name="train_context_mcqa", version=VERSION, description="Train set context MCQA"),
datasets.BuilderConfig(name="val_context_mcqa", version=VERSION, description="Val set context MCQA"),
datasets.BuilderConfig(name="test_context_mcqa", version=VERSION, description="Test set context MCQA"),
datasets.BuilderConfig(name="train_aqa", version=VERSION, description="Train set AQA"),
datasets.BuilderConfig(name="val_aqa", version=VERSION, description="Val set AQA"),
datasets.BuilderConfig(name="test_aqa", version=VERSION, description="Test set AQA"),
]
def _info(self):
features_dict = {
"train_normal_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
"val_normal_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"few_shot_prompt": datasets.Sequence(datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
})),
}),
"test_normal_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"few_shot_prompt": datasets.Sequence(datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
})),
}),
"train_context_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"context": datasets.Sequence(datasets.Value("string"))
}),
"val_context_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"context": datasets.Sequence(datasets.Value("string")),
"few_shot_prompt": [{
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"context": datasets.Sequence(datasets.Value("string")),
}],
}),
"test_context_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"context": datasets.Sequence(datasets.Value("string")),
"few_shot_prompt": datasets.Sequence(datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"context": datasets.Sequence(datasets.Value("string")),
})),
}),
"train_aqa": datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"prompt": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
"val_aqa": datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Sequence(datasets.Value("string")),
"prompt": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
"test_aqa": datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Sequence(datasets.Value("string")),
"prompt": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features_dict[self.config.name],
homepage=_HOMEPAGE,
license=_LICENSE
)
def _split_generators(self, dl_manager):
# Download and extract all the files in the directory
data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
return [
datasets.SplitGenerator(
name=self.config.name,
gen_kwargs={"directory": data_dir, "split": self.config.name},
)
]
def _generate_examples(self, directory, split):
# Iterate over each file in the directory
key_idx = 0
for filename in os.listdir(directory):
filepath = os.path.join(directory, filename)
if filepath.endswith(".jsonl"):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
if split == "train_normal_mcqa":
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl","")
}
key_idx +=1
elif split in ["val_normal_mcqa", "test_normal_mcqa"]:
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl",""),
"few_shot_prompt": [{
"question": item["question"],
"answer": item["answer"],
"options": item["options"],
} for item in data["few_shot_prompt"]]
}
key_idx +=1
elif split == "train_context_mcqa":
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"context": data["context"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl","")
}
key_idx +=1
elif split in ["val_context_mcqa", "test_context_mcqa"]:
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"context": data["context"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl",""),
"few_shot_prompt": data["few_shot_prompt"]}
key_idx +=1
elif split in ["train_aqa", "val_aqa", "test_aqa"]:
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"answer": data["answer"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl","")
}
key_idx +=1