Datasets:

ArXiv:
nllb / nllb.py
akshitab's picture
fix lang pairs
21252b5
raw
history blame
4.83 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""No Language Left Behind (NLLB)"""
import datasets
import csv
import json
_CITATION = "" # TODO
_DESCRIPTION = "" # TODO
_HOMEPAGE = "" # TODO
_LICENSE = "" # TODO
from .nllb_lang_pairs import LANG_PAIRS as _LANGUAGE_PAIRS
_URL_BASE = "https://storage.googleapis.com/allennlp-data-bucket/nllb/"
_URLs = {
f"{src_lg}-{trg_lg}": f"{_URL_BASE}{src_lg}-{trg_lg}.gz"
for src_lg, trg_lg in _LANGUAGE_PAIRS
}
class NLLBTaskConfig(datasets.BuilderConfig):
"""BuilderConfig for No Language Left Behind Dataset."""
def __init__(self, src_lg, tgt_lg, **kwargs):
super(NLLBTaskConfig, self).__init__(**kwargs)
self.src_lg = src_lg
self.tgt_lg = tgt_lg
class NLLB(datasets.GeneratorBasedBuilder):
"""No Language Left Behind Dataset."""
BUILDER_CONFIGS = [
NLLBTaskConfig(
name=f"{src_lg}-{tgt_lg}",
version=datasets.Version("1.0.0"),
description=f"No Language Left Behind (NLLB): {src_lg} - {tgt_lg}",
src_lg=src_lg,
tgt_lg=tgt_lg,
)
for (src_lg, tgt_lg) in _LANGUAGE_PAIRS
]
BUILDER_CONFIG_CLASS = NLLBTaskConfig
def _info(self):
# define feature types
features = datasets.Features(
{
"translation": datasets.Translation(
languages=(self.config.src_lg, self.config.tgt_lg)
),
"laser_score": datasets.Value("float32"),
"source_sentence_lid": datasets.Value("float32"),
"target_sentence_lid": datasets.Value("float32"),
"source_sentence_source": datasets.Value("string"),
"source_sentence_url": datasets.Value("string"),
"target_sentence_source": datasets.Value("string"),
"target_sentence_url": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
pair = f"{self.config.src_lg}-{self.config.tgt_lg}" # string identifier for language pair
url = _URLs[pair] # url for download of pair-specific file
data_file = dl_manager.download_and_extract(
url
) # extract downloaded data and store path in data_file
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_file,
"source_lg": self.config.src_lg,
"target_lg": self.config.tgt_lg,
},
)
]
def _generate_examples(self, filepath, source_lg, target_lg):
with open(filepath, encoding="utf-8") as f:
# reader = csv.reader(f, delimiter="\t")
for id_, example in enumerate(f):
try:
datarow = example.split("\t")
row = {}
row["translation"] = {
source_lg: datarow[0],
target_lg: datarow[1],
} # create translation json
row["laser_score"] = float(datarow[2])
row["source_sentence_lid"] = float(datarow[3])
row["target_sentence_lid"] = float(datarow[4])
row["source_sentence_source"] = datarow[5]
row["source_sentence_url"] = datarow[6]
row["target_sentence_source"] = datarow[7]
row["target_sentence_url"] = datarow[8]
row = {
k: None if not v else v for k, v in row.items()
} # replace empty values
except:
print(datarow)
raise
yield id_, row
# to test the script, go to the root folder of the repo (nllb) and run:
# datasets-cli test nllb --save_infos --all_configs