parallel_data / parallel_data.py
Miquel Esplà Gomis
Renaming script
66107a3
raw
history blame
4.15 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""macocu_parallel"""
import os
import csv
import datasets
_CITATION = """\
@inproceedings{banon2022macocu,
title={MaCoCu: Massive collection and curation of monolingual and bilingual data: focus on under-resourced languages},
author={Ban{\'o}n, Marta and Espla-Gomis, Miquel and Forcada, Mikel L and Garc{\'\i}a-Romero, Cristian and Kuzman, Taja and Ljube{\v{s}}i{\'c}, Nikola and van Noord, Rik and Sempere, Leopoldo Pla and Ram{\'\i}rez-S{\'a}nchez, Gema and Rupnik, Peter and others},
booktitle={23rd Annual Conference of the European Association for Machine Translation, EAMT 2022},
pages={303--304},
year={2022},
organization={European Association for Machine Translation}
}
"""
_DESCRIPTION = """\
The MaCoCu parallel dataset is an English-centric collection of 11
parallel corpora including the following languages: Albanian,
Bulgarian, Bosnian, Croatian, Icelandic, Macedonian, Maltese,
Montenegrin, Serbian, Slovenian, and Turkish. These corpora have
been automatically crawled from national and generic top-level
domains (for example, ".hr" for croatian, or ".is" for icelandic);
then, a parallel curation pipeline has been applied to produce
the final data (see https://github.com/bitextor/bitextor).
"""
_URL = {
"evaluation": "https://object.pouta.csc.fi/Tatoeba-Challenge-devtest/test.tar",
"development": "https://object.pouta.csc.fi/Tatoeba-Challenge-devtest/dev.tar",
}
_LanguagePairs = [ "en-is" ]
#_LanguagePairs = [ "en-bs", "en-bg", "en-is", "en-hr", "en-sq", "en-mt", "en-mk", "en-cnr", "en-sr", "en-sl", "en-tr" ]
_LICENSE = "cc0"
_HOMEPAGE = "https://macocu.eu"
class macocuConfig(datasets.BuilderConfig):
"""BuilderConfig for macocu_parallel"""
def __init__(self, language_pair, **kwargs):
super().__init__(**kwargs)
"""
Args:
language_pair: language pair to be loaded
**kwargs: keyword arguments forwarded to super.
"""
self.language_pair = language_pair
class MaCoCu_parallel(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = macocuConfig
BUILDER_CONFIGS = [
macocuConfig(name=pair, description=_DESCRIPTION, language_pair=pair )
for pair in _LanguagePairs
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"sourceString": datasets.Value("string"),
"targetString": datasets.Value("string")
}),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE
)
def _split_generators(self, dl_manager):
lang_pair = self.config.language_pair
path = os.path.join("data", f"{lang_pair}.macocuv2.tsv")
data_file = dl_manager.download_and_extract({"data_file": path})
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=data_file)]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar='"')
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, {
"sourceLang": row[0],
"targetlang": row[1],
"sourceString": row[2],
"targetString": row[3]
}