File size: 3,798 Bytes
dc0fb27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import json
from dataclasses import dataclass
from string import Template

import datasets
from datasets.download.download_manager import DownloadManager

_CITATION = ""
_DESCRIPTION = \
    """
    Wura is large-scale pretraining data for 20 languages popularly spoken in Africa.
    """
_HOMEPAGE = "https://github.com/castorini/AfriTeVa-keji"
_LICENSE = "Apache License 2.0"
_DOCUMENT_DATASET_VERSION = "1.0.0"
_PASSAGE_DATASET_VERSION = "1.0.0"
_LANGUAGES = {
    "Afrikaans": "afr",
    "Amharic": "amh",
    "Egyptian Arabic": "arz",
    "English": "eng",
    "French": "fra",
    "Hausa": "hau",
    "Igbo": "ibo",
    "Gahuza": "kin",
    "Malagasy": "mlg",
    "Chichewa": "nya",
    "Afaan Oromoo": "orm",
    # "Nigerian Pidgin": "pcm",
    "Portuguese": "por",
    "Shona": "sna",
    "Somali": "som",
    "Sesotho": "sot",
    "Swahili": "swa",
    "Tigrinya": "tir",
    "Xhosa": "xho",
    "Yoruba": "yor",
    "Zulu": "zul"
}
_DOCUMENT_DATASET_URL = Template("./documents-v1.0/${split}/${language}.jsonl")
_PASSAGE_DATASET_URL = Template("./passages-v1.0/${split}/${language}.txt")

INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"


@dataclass
class WuraConfig(datasets.BuilderConfig):
    level: str = "document"


class WuraDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        WuraConfig(
            name=language,
            version=datasets.Version(_DOCUMENT_DATASET_VERSION),
            description=f"Wura dataset for language: {language}\n{_DESCRIPTION}",
        ) for language in _LANGUAGES.values()
    ]

    DEFAULT_CONFIG_NAME = "afr"
    
    def _info(self):
        if self.config.level == "document":
            features = ["id", "headline", "content", "category", "url"]
        elif self.config.level == "passage":
            features = ["id", "text"]
        else:
            raise ValueError("level can only be one of `document` or `passage`")
        
        features = {feature: datasets.Value("string") for feature in features}

        return datasets.DatasetInfo(
            description=self.config.description,
            features=datasets.Features(features),
            homepage=_HOMEPAGE,
            citation=_CITATION,
            license=_LICENSE
        )
    
    def _split_generators(self, dl_manager: DownloadManager):
        if self.config.level == "document":
            data_files = {
                split: _DOCUMENT_DATASET_URL.substitute(
                    split=split,
                    language=self.config.name,
                ) for split in ["train", "eval"]
            }
        elif self.config.level == "passage":
            data_files = {
                split: _PASSAGE_DATASET_URL.substitute(
                    split=split,
                    language=self.config.name,
                ) for split in ["train", "eval"]
            }
        else:
            raise ValueError("level can only be one of `document` or `passage`")
        
        language_files = dl_manager.download_and_extract(data_files)
        
        splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": language_files["train"]}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": language_files["eval"]}
            )
        ]
        return splits
    
    def _generate_examples(self, filepath: str):
        with open(filepath, encoding="utf-8") as f:
            for idx, line in enumerate(f):
                if self.config.level == "document":
                    data = json.loads(line)
                    data["id"] = idx
                else:
                    data = {"id": idx, "text": line.strip()}

                yield idx, data