|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""MENYO-20k: A Multi-domain English - Yorùbá Corpus for Machine Translations""" |
|
|
|
|
|
import csv |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@dataset{david_ifeoluwa_adelani_2020_4297448, |
|
author = {David Ifeoluwa Adelani and |
|
Jesujoba O. Alabi and |
|
Damilola Adebonojo and |
|
Adesina Ayeni and |
|
Mofe Adeyemi and |
|
Ayodele Awokoya}, |
|
title = {MENYO-20k: A Multi-domain English - Yorùbá Corpus |
|
for Machine Translation}, |
|
month = nov, |
|
year = 2020, |
|
publisher = {Zenodo}, |
|
version = {1.0}, |
|
doi = {10.5281/zenodo.4297448}, |
|
url = {https://doi.org/10.5281/zenodo.4297448} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
MENYO-20k is a multi-domain parallel dataset with texts obtained from news articles, ted talks, movie transcripts, radio transcripts, science and technology texts, and other short articles curated from the web and professional translators. The dataset has 20,100 parallel sentences split into 10,070 training sentences, 3,397 development sentences, and 6,633 test sentences (3,419 multi-domain, 1,714 news domain, and 1,500 ted talks speech transcript domain). The development and test sets are available upon request. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://zenodo.org/record/4297448#.X81G7s0zZPY" |
|
|
|
|
|
_LICENSE = "For non-commercial use because some of the data sources like Ted talks and JW news requires permission for commercial use." |
|
|
|
|
|
|
|
|
|
_URL = "https://raw.githubusercontent.com/uds-lsv/menyo-20k_MT/master/data/train.tsv" |
|
|
|
|
|
class Menyo20kMt(datasets.GeneratorBasedBuilder): |
|
"""MENYO-20k: A Multi-domain English - Yorùbá Corpus for Machine Translations""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="menyo20k_mt", |
|
version=VERSION, |
|
description="MENYO-20k: A Multi-domain English - Yorùbá Corpus for Machine Translations", |
|
) |
|
] |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features({"translation": datasets.features.Translation(languages=("en", "yo"))}), |
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
train_path = dl_manager.download_and_extract(_URL) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE) |
|
for idx, row in enumerate(reader): |
|
result = {"translation": {"en": row["English"], "yo": row["Yoruba"]}} |
|
yield idx, result |
|
|