indo_story_cloze / indo_story_cloze.py
indolem's picture
Update indo_story_cloze.py
333f652
import csv
import datasets
_URLs = {
'train': "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/train.csv",
'validation': "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/dev.csv",
'test': "https://huggingface.co/datasets/indolem/indo_story_cloze/resolve/main/test.csv"
}
_CITATION = """\
@inproceedings{koto-etal-2022-cloze,
title = "Cloze Evaluation for Deeper Understanding of Commonsense Stories in {I}ndonesian",
author = "Koto, Fajri and
Baldwin, Timothy and
Lau, Jey Han",
booktitle = "Proceedings of the First Workshop on Commonsense Representation and Reasoning (CSRR 2022)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.csrr-1.2",
doi = "10.18653/v1/2022.csrr-1.2",
pages = "8--16",
}"""
class IndoStoryClozeConfig(datasets.BuilderConfig):
"""IndoStoryClozeConfig for IndoStoryCloze."""
def __init__(self, **kwargs):
"""BuilderConfig for IndoStoryCloze.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 1.0.0: Release version
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = ['sentence-1','sentence-2','sentence-3','sentence-4', 'correct_ending', 'incorrect_ending']
class IndoStoryCloze(datasets.GeneratorBasedBuilder):
"""The Indo_Story_Cloze Datasets."""
BUILDER_CONFIGS = [IndoStoryClozeConfig()]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description='indo_story_cloze',
features=datasets.Features(features),
homepage='https://github.com/fajri91/IndoCloze',
citation=_CITATION
)
def _split_generators(self, dl_manager):
downloaded_file = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_file": downloaded_file['train']}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_file": downloaded_file['validation']}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_file": downloaded_file['test']}),
]
def _generate_examples(self, data_file):
data = csv.DictReader(open(data_file, newline=''))
for i, row in enumerate(data):
yield i, {
"sentence-1": row['Kalimat-1'],
"sentence-2": row['Kalimat-2'],
"sentence-3": row['Kalimat-3'],
"sentence-4": row['Kalimat-4'],
"correct_ending": row['Correct Ending'],
"incorrect_ending": row['Incorrect Ending'],
}