Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
lastrucci01 commited on
Commit
4a1bbb8
1 Parent(s): 61d62b8

updating vukuzenzele monolingual to use a dataset generator

Browse files
afr.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5af3a771b3344ccceb11e185716deded04a957d963b54d6515402115bc82ca6
3
- size 249645
 
 
 
 
eng.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d28f8da74961aa98a00bcfcfa0155a72d8ff36eba357753c38ef8eaf7b873ba5
3
- size 191987
 
 
 
 
nbl.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8a748e1957dd484615983a25cef59a0ca29b569c33f03ca614871ade8c58687
3
- size 275532
 
 
 
 
nso.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a70167a06503f79a0efc641fb8d7850d0936aa3557840437de50679caaaf13e
3
- size 255078
 
 
 
 
sot.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:486089fdb22a6330a533e0981739ee0c9b4cde1adeebefa5671b31d42933365d
3
- size 250336
 
 
 
 
ssw.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2007f35b3b28fd11b89207be5ec90f7c573a71f40af43d8150d084b8a55ac045
3
- size 261417
 
 
 
 
tsn.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7ce237bc9821289c9dd91a1b43fbf0cd00deb76b8f4d2c965742e30a79267e1
3
- size 273197
 
 
 
 
tso.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e99dc959fd6e4a95cdb00adb48cd18d3ba718f70672c33c01c6e27429c45ec1
3
- size 243815
 
 
 
 
ven.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:67499af808c6c43d41c8b6dca4bba286945a43256ee77c1f042de96886977c5e
3
- size 256295
 
 
 
 
vukuzenzele-monolingual.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """TODO: Add a description here."""
3
+
4
+
5
+ import csv
6
+ import json
7
+ import os
8
+
9
+ import datasets
10
+
11
+
12
+ # TODO: Add BibTeX citation
13
+ # Find for instance the citation on arxiv or on the dataset repo/website
14
+ _CITATION = """\
15
+ @dataset{marivate_vukosi_2023_7598540, author = {Marivate, Vukosi and Njini, Daniel and Madodonga, Andani and Lastrucci, Richard and Dzingirai, Isheanesu Rajab, Jenalea}, title = {The Vuk'uzenzele South African Multilingual Corpus}, month = feb, year = 2023, publisher = {Zenodo}, doi = {10.5281/zenodo.7598539}, url = {https://doi.org/10.5281/zenodo.7598539} }
16
+ """
17
+
18
+ _DESCRIPTION = """\
19
+ The dataset contains editions from the South African government magazine Vuk'uzenzele. Data was scraped from PDFs that have been placed in the data/raw folder. The PDFS were obtained from the Vuk'uzenzele website.
20
+ """
21
+
22
+ # TODO: Add a link to an official homepage for the dataset here
23
+ _HOMEPAGE = "https://arxiv.org/abs/2303.03750"
24
+
25
+ # TODO: Add the licence for the dataset here if you can find it
26
+ _LICENSE = "CC 4.0 BY"
27
+
28
+ _URL = "https://raw.githubusercontent.com/dsfsi/vukuzenzele-nlp/master/data/huggingface/"
29
+ _DATAFILE = "data.jsonl"
30
+
31
+ class VukuzenzeleMonolingualConfig(datasets.BuilderConfig):
32
+ """BuilderConfig for VukuzenzeleMonolingual"""
33
+
34
+ def __init__(self, **kwargs):
35
+ """BuilderConfig for Masakhaner.
36
+ Args:
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super(VukuzenzeleMonolingualConfig, self).__init__(**kwargs)
40
+
41
+
42
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
43
+ class VukuzenzeleMonolingual(datasets.GeneratorBasedBuilder):
44
+ """TODO: Short description of my dataset."""
45
+
46
+ VERSION = datasets.Version("1.0.0")
47
+
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(name="afr", version=VERSION, description="Vukuzenzele Afrikaans Dataset"),
50
+ datasets.BuilderConfig(name="eng", version=VERSION, description="Vukuzenzele English Dataset"),
51
+ datasets.BuilderConfig(name="nbl", version=VERSION, description="Vukuzenzele Ndebele Dataset"),
52
+ datasets.BuilderConfig(name="nso", version=VERSION, description="Vukuzenzele Sepedi Dataset"),
53
+ datasets.BuilderConfig(name="sot", version=VERSION, description="Vukuzenzele Sesotho Dataset"),
54
+ datasets.BuilderConfig(name="ssw", version=VERSION, description="Vukuzenzele siSwati Dataset"),
55
+ datasets.BuilderConfig(name="tsn", version=VERSION, description="Vukuzenzele Setswana Dataset"),
56
+ datasets.BuilderConfig(name="tso", version=VERSION, description="Vukuzenzele Xitsonga Dataset"),
57
+ datasets.BuilderConfig(name="ven", version=VERSION, description="Vukuzenzele Tshivenda Dataset"),
58
+ datasets.BuilderConfig(name="xho", version=VERSION, description="Vukuzenzele isiXhosa Dataset"),
59
+ datasets.BuilderConfig(name="zul", version=VERSION, description="Vukuzenzele isiZulu Dataset"),
60
+ ]
61
+
62
+ def _info(self):
63
+ features = datasets.Features(
64
+ {
65
+ "title": datasets.Value("string"),
66
+ "text": datasets.Value("string"),
67
+ "language_code": datasets.Value("string"),
68
+ "edition": datasets.Value("string")
69
+ }
70
+ )
71
+ return datasets.DatasetInfo(
72
+ description=_DESCRIPTION,
73
+ features=features,
74
+ homepage=_HOMEPAGE,
75
+ license=_LICENSE,
76
+ # Citation for the dataset
77
+ citation=_CITATION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+
82
+ urls = {
83
+ "train": f"{_URL}{self.config.name}/{_DATAFILE}"
84
+ }
85
+ data_dir = dl_manager.download_and_extract(urls)
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TRAIN,
89
+ gen_kwargs={
90
+ "filepath": data_dir["train"],
91
+ "split": "train",
92
+ },
93
+ ),
94
+ ]
95
+
96
+ def _generate_examples(self, filepath, split):
97
+ with open(filepath, encoding="utf-8") as f:
98
+ for key, row in enumerate(f):
99
+ data = json.loads(row)
100
+ if 'title' not in data.keys(): continue
101
+ yield key, {
102
+ "title": data["title"],
103
+ "text": data["text"],
104
+ "edition": data["edition"],
105
+ "language_code": data["language_code"],
106
+ }
107
+
xho.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1da7270ae308e7e9458cd55daf1f3f10d648dda7f8ba1614d4d62d55a78babf5
3
- size 265230
 
 
 
 
zul.jsonl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c963ce535b21131cc66136d921f9b652ba8851ed409c69e433e3dddab4127ac7
3
- size 255139