|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
from itertools import combinations |
|
|
|
LANGUAGES = ['afr', 'eng', 'nbl', 'nso', 'sot', 'ssw', 'tsn', 'tso', 'ven', 'xho', 'zul'] |
|
LANGUAGE_PAIRS = list(combinations(LANGUAGES, 2)) |
|
|
|
_CITATION = """\ |
|
@dataset{marivate_vukosi_2023_7598540, author = {Marivate, Vukosi and Njini, Daniel and Madodonga, Andani and Lastrucci, Richard and Dzingirai, Isheanesu Rajab, Jenalea}, title = {The Vuk'uzenzele South African Multilingual Corpus}, month = feb, year = 2023, publisher = {Zenodo}, doi = {10.5281/zenodo.7598539}, url = {https://doi.org/10.5281/zenodo.7598539} } |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The dataset contains editions from the South African government magazine Vuk'uzenzele. Data was scraped from PDFs that have been placed in the data/raw folder. The PDFS were obtained from the Vuk'uzenzele website. |
|
""" |
|
|
|
_HOMEPAGE = "https://arxiv.org/abs/2303.03750" |
|
|
|
_LICENSE = "CC 4.0 BY" |
|
|
|
_URL = "https://raw.githubusercontent.com/dsfsi/vukuzenzele-nlp/master/data/opt_aligned_out/" |
|
|
|
class VukuzenzeleMonolingualConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for VukuzenzeleMonolingual""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for Masakhaner. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(VukuzenzeleMonolingualConfig, self).__init__(**kwargs) |
|
|
|
|
|
|
|
class VukuzenzeleMonolingual(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = [] |
|
|
|
for pair in LANGUAGE_PAIRS: |
|
name = "aligned-{}-{}.jsonl".format(pair[0], pair[1]) |
|
description = "Vukuzenzele {}-{} aligned dataset".format(pair[0], pair[1]) |
|
BUILDER_CONFIGS.append(datasets.BuilderConfig(name=f"{name}", version=VERSION, description=f"{description}"),) |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"src": datasets.Value("string"), |
|
"tgt": datasets.Value("string"), |
|
"score": datasets.Value("float"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
urls = { |
|
"train": f"{_URL}{self.config.name}" |
|
} |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": data_dir["train"], |
|
"split": "train", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
with open(filepath, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
yield key, { |
|
"src": data["src"], |
|
"tgt": data["tgt"], |
|
"score": data["score"], |
|
} |
|
|