|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Data Loader for SIMPITIKI Dataset with challenge splits""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import datasets |
|
from lxml import etree |
|
|
|
_CITATION = """\ |
|
@article{tonelli2016simpitiki, |
|
title={SIMPITIKI: a Simplification corpus for Italian}, |
|
author={Tonelli, Sara and Aprosio, Alessio Palmero and Saltori, Francesca}, |
|
journal={Proceedings of CLiC-it}, |
|
year={2016} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
SIMPITIKI is a Simplification corpus for Italian and it consists of two sets of simplified pairs: the first one is harvested from the Italian Wikipedia in a semi-automatic way; the second one is manually annotated sentence-by-sentence from documents in the administrative domain. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/dhfbk/simpitiki" |
|
|
|
_LICENSE = "CC-BY 4.0" |
|
|
|
_URLs = { |
|
"v1":{ |
|
"random": { |
|
"train":"./v1/random_split/train.jsonl", |
|
"val":"./v1/random_split/val.jsonl", |
|
"test":"./v1/random_split/test.jsonl" |
|
}, |
|
"transformations": { |
|
"train": "./v1/transformations_split/train.jsonl", |
|
"val": "./v1/transformations_split/val.jsonl", |
|
"seen_transformations_test": "./v1/transformations_split/seen_transformations_test.jsonl", |
|
"unseen_transformations_test":"./v1/transformations_split/unseen_transformations_test.jsonl" |
|
}, |
|
"source_dataset": { |
|
"itwiki_train":"./v1/source_dataset_split/itwiki_train.jsonl", |
|
"itwiki_val": "./v1/source_dataset_split/itwiki_val.jsonl", |
|
"itwiki_test":"./v1/source_dataset_split/itwiki_test.jsonl", |
|
"tn_test":"./v1/source_dataset_split/tn_test.jsonl" |
|
} |
|
}, |
|
"v2":{ |
|
"random": { |
|
"train":"./v2/random_split/train.jsonl", |
|
"val":"./v2/random_split/val.jsonl", |
|
"test":"./v2/random_split/test.jsonl" |
|
}, |
|
"transformations": { |
|
"train": "./v2/transformations_split/train.jsonl", |
|
"val": "./v2/transformations_split/val.jsonl", |
|
"seen_transformations_test": "./v2/transformations_split/seen_transformations_test.jsonl", |
|
"unseen_transformations_test":"./v2/transformations_split/unseen_transformations_test.jsonl" |
|
}, |
|
"source_dataset": { |
|
"itwiki_train":"./v2/source_dataset_split/itwiki_train.jsonl", |
|
"itwiki_val": "./v2/source_dataset_split/itwiki_val.jsonl", |
|
"itwiki_test":"./v2/source_dataset_split/itwiki_test.jsonl", |
|
"tn_test":"./v2/source_dataset_split/tn_test.jsonl" |
|
} |
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
class SIMPITIKI(datasets.GeneratorBasedBuilder): |
|
"""SIMPITIKI is a dataset built for Sentence Simplification Task. It provides complex-to-simple sentence pairs.""" |
|
|
|
VERSION_1 = datasets.Version("1.0.0") |
|
VERSION_2 = datasets.Version("2.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="v1", version=VERSION_1, description="First version"), |
|
datasets.BuilderConfig(name="v2", version=VERSION_2, description="Second version with better sentence boundaries."), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "v2" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"simplified_text": datasets.Value("string"), |
|
"transformation_type":datasets.Value("string"), |
|
"source_dataset":datasets.Value("string") |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
downloaded_files = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['random']['train'], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['random']['val'], |
|
"split": "val" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['random']['test'], |
|
"split": "test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_seen_transformations_train', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['train'], |
|
"split": "challenge_seen_transformations_train", |
|
}, |
|
), |
|
|
|
|
|
datasets.SplitGenerator( |
|
name='challenge_seen_transformations_val', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['val'], |
|
"split": "challenge_seen_transformations_val", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_seen_transformations_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['seen_transformations_test'], |
|
"split": "challenge_seen_transformations_test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_unseen_transformations_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['unseen_transformations_test'], |
|
"split": "challenge_unseen_transformations_test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_itwiki_train', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['itwiki_train'], |
|
"split": "challenge_itwiki_train", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_itwiki_val', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['itwiki_val'], |
|
"split": "challenge_itwiki_val", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_itwiki_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['itwiki_test'], |
|
"split": "challenge_itwiki_test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_tn_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['tn_test'], |
|
"split": "challenge_tn_test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
yield id_, { |
|
"text": data["text"], |
|
"simplified_text": data["simplified_text"], |
|
"transformation_type":data["transformation_type"], |
|
"source_dataset": data["source_dataset"], |
|
"gem_id": f"gem-SIMPITIKI-{split}-{id_}", |
|
} |
|
|
|
|
|
|
|
|
|
|