paraqa-sparqltotext / paraqa-sparqltotext.py
glecorve's picture
Inflate JSON dataset
bf9e693
raw
history blame
4.7 kB
import os
import zipfile
import json
import base64
import datasets
try:
import gitlab
except ImportError:
print("ERROR: To be able to retrieve this dataset you need to install the `python-gitlab` package")
_CITATION = """\
@inproceedings{lecorve2022sparql2text,
title={Coqar: Question rewriting on coqa},
author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
year={2022}
}
"""
_HOMEPAGE = ""
_URLS = {
"train": "json/train.json",
"valid": "json/valid.json",
"test": "json/test.json"
}
_DESCRIPTION = """\
Special version of ParaQA for the SPARQL-to-Text task
"""
class ParaQA_SPARQL2Text(datasets.GeneratorBasedBuilder):
"""
ParaQA-SPARQL2Text: Special version of ParaQA for the SPARQL-to-Text task
"""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"uid": datasets.Value('string'),
"query": datasets.Value('string'),
"question": datasets.Value('string'),
"simplified_query": datasets.Value('string'),
"answer": datasets.Value('string'),
"verbalized_answer": datasets.Value('string'),
"verbalized_answer_2": datasets.Value('string'),
"verbalized_answer_3": datasets.Value('string'),
"verbalized_answer_4": datasets.Value('string'),
"verbalized_answer_5": datasets.Value('string'),
"verbalized_answer_6": datasets.Value('string'),
"verbalized_answer_7": datasets.Value('string'),
"verbalized_answer_8": datasets.Value('string')
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset
supervised_keys=("simplified_query", "question"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
paths = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": dl_manager.extract(paths['train']),
"split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": dl_manager.extract(paths['valid']),
"split": "valid"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": dl_manager.extract(paths['test']),
"split": "test"}
)
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
def transform_sample(original_sample):
transformed_sample = {
"uid": "",
"query": "",
"question": "",
"simplified_query": "",
"answer": "",
"verbalized_answer": "",
"verbalized_answer_2": "",
"verbalized_answer_3": "",
"verbalized_answer_4": "",
"verbalized_answer_5": "",
"verbalized_answer_6": "",
"verbalized_answer_7": "",
"verbalized_answer_8": ""
}
transformed_sample.update(original_sample)
return transformed_sample
# Yields (key, example) tuples from the dataset
print("Opening %s"%filepath)
with open(filepath,'r') as f:
data = json.load(f)
key = 0
for it in data:
yield key, transform_sample(it)
key += 1