|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""No Language Left Behind (NLLB)""" |
|
|
|
import datasets |
|
import csv |
|
import json |
|
|
|
_CITATION = "" |
|
|
|
|
|
_DESCRIPTION = "" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
from .nllb_lang_pairs import LANG_PAIRS as _LANGUAGE_PAIRS |
|
|
|
_URL_BASE = "https://storage.googleapis.com/allennlp-data-bucket/nllb/" |
|
|
|
_URLs = { |
|
f"{src_lg}-{trg_lg}": f"{_URL_BASE}{src_lg}-{trg_lg}.gz" |
|
for src_lg, trg_lg in _LANGUAGE_PAIRS |
|
} |
|
|
|
|
|
class NLLBTaskConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for No Language Left Behind Dataset.""" |
|
|
|
def __init__(self, src_lg, tgt_lg, **kwargs): |
|
super(NLLBTaskConfig, self).__init__(**kwargs) |
|
self.src_lg = src_lg |
|
self.tgt_lg = tgt_lg |
|
|
|
|
|
class NLLB(datasets.GeneratorBasedBuilder): |
|
"""No Language Left Behind Dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
NLLBTaskConfig( |
|
name=f"{src_lg}-{tgt_lg}", |
|
version=datasets.Version("1.0.0"), |
|
description=f"No Language Left Behind (NLLB): {src_lg} - {tgt_lg}", |
|
src_lg=src_lg, |
|
tgt_lg=tgt_lg, |
|
) |
|
for (src_lg, tgt_lg) in _LANGUAGE_PAIRS |
|
] |
|
BUILDER_CONFIG_CLASS = NLLBTaskConfig |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"translation": datasets.Translation( |
|
languages=(self.config.src_lg, self.config.tgt_lg) |
|
), |
|
"laser_score": datasets.Value("float32"), |
|
"source_sentence_lid": datasets.Value("float32"), |
|
"target_sentence_lid": datasets.Value("float32"), |
|
"source_sentence_source": datasets.Value("string"), |
|
"source_sentence_url": datasets.Value("string"), |
|
"target_sentence_source": datasets.Value("string"), |
|
"target_sentence_url": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
pair = f"{self.config.src_lg}-{self.config.tgt_lg}" |
|
url = _URLs[pair] |
|
data_file = dl_manager.download_and_extract( |
|
url |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": data_file, |
|
"source_lg": self.config.src_lg, |
|
"target_lg": self.config.tgt_lg, |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepath, source_lg, target_lg): |
|
with open(filepath, encoding="utf-8") as f: |
|
|
|
for id_, example in enumerate(f): |
|
try: |
|
datarow = example.split("\t") |
|
row = {} |
|
row["translation"] = { |
|
source_lg: datarow[0], |
|
target_lg: datarow[1], |
|
} |
|
row["laser_score"] = float(datarow[2]) |
|
row["source_sentence_lid"] = float(datarow[3]) |
|
row["target_sentence_lid"] = float(datarow[4]) |
|
row["source_sentence_source"] = datarow[5] |
|
row["source_sentence_url"] = datarow[6] |
|
row["target_sentence_source"] = datarow[7] |
|
row["target_sentence_url"] = datarow[8] |
|
row = { |
|
k: None if not v else v for k, v in row.items() |
|
} |
|
except: |
|
print(datarow) |
|
raise |
|
yield id_, row |
|
|
|
|
|
|
|
|
|
|