holylovenia commited on
Commit
a7243b5
1 Parent(s): 56ff802

Upload wili_2018.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wili_2018.py +359 -0
wili_2018.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import datasets
4
+
5
+ from seacrowd.utils import schemas
6
+ from seacrowd.utils.configs import SEACrowdConfig
7
+ from seacrowd.utils.constants import Licenses, Tasks
8
+
9
+ _CITATION = """
10
+ @article{thoma2018wili,
11
+ title={The WiLI benchmark dataset for written language identification},
12
+ author={Thoma, Martin},
13
+ journal={arXiv preprint arXiv:1801.07779},
14
+ year={2018}
15
+ }
16
+ """
17
+
18
+ _DATASETNAME = "wili_2018"
19
+
20
+ _DESCRIPTION = """
21
+ WiLI-2018 is a Wikipedia language identification benchmark dataset. It contains 235000 paragraphs from 235 languages.
22
+ The dataset is balanced, and a train-test split is provided.
23
+ """
24
+
25
+ _HOMEPAGE = "https://zenodo.org/records/841984"
26
+
27
+ _LANGUAGES = ["nrm", "jav", "min", "lao", "mya", "pag", "ind", "cbk", "tet", "tha", "ceb", "tgl", "bjn", "bcl", "vie"]
28
+
29
+ _LICENSE = Licenses.ODBL.value
30
+
31
+ _LOCAL = False
32
+
33
+ _URLS = {
34
+ _DATASETNAME: {"train": "https://drive.google.com/uc?export=download&id=1ZzlIQvw1KNBG97QQCfdatvVrrbeLaM1u", "test": "https://drive.google.com/uc?export=download&id=1Xx4kFc1Xdzz8AhDasxZ0cSa-a35EQSDZ"},
35
+ }
36
+
37
+ _SUPPORTED_TASKS = [Tasks.LANGUAGE_IDENTIFICATION]
38
+
39
+ _SOURCE_VERSION = "1.0.0"
40
+
41
+ _SEACROWD_VERSION = "2024.06.20"
42
+
43
+
44
+ _CLASSES = [
45
+ "cdo",
46
+ "glk",
47
+ "jam",
48
+ "lug",
49
+ "san",
50
+ "rue",
51
+ "wol",
52
+ "new",
53
+ "mwl",
54
+ "bre",
55
+ "ara",
56
+ "hye",
57
+ "xmf",
58
+ "ext",
59
+ "cor",
60
+ "yor",
61
+ "div",
62
+ "asm",
63
+ "lat",
64
+ "cym",
65
+ "hif",
66
+ "ace",
67
+ "kbd",
68
+ "tgk",
69
+ "rus",
70
+ "nso",
71
+ "mya",
72
+ "msa",
73
+ "ava",
74
+ "cbk",
75
+ "urd",
76
+ "deu",
77
+ "swa",
78
+ "pus",
79
+ "bxr",
80
+ "udm",
81
+ "csb",
82
+ "yid",
83
+ "vro",
84
+ "por",
85
+ "pdc",
86
+ "eng",
87
+ "tha",
88
+ "hat",
89
+ "lmo",
90
+ "pag",
91
+ "jav",
92
+ "chv",
93
+ "nan",
94
+ "sco",
95
+ "kat",
96
+ "bho",
97
+ "bos",
98
+ "kok",
99
+ "oss",
100
+ "mri",
101
+ "fry",
102
+ "cat",
103
+ "azb",
104
+ "kin",
105
+ "hin",
106
+ "sna",
107
+ "dan",
108
+ "egl",
109
+ "mkd",
110
+ "ron",
111
+ "bul",
112
+ "hrv",
113
+ "som",
114
+ "pam",
115
+ "nav",
116
+ "ksh",
117
+ "nci",
118
+ "khm",
119
+ "sgs",
120
+ "srn",
121
+ "bar",
122
+ "cos",
123
+ "ckb",
124
+ "pfl",
125
+ "arz",
126
+ "roa-tara",
127
+ "fra",
128
+ "mai",
129
+ "zh-yue",
130
+ "guj",
131
+ "fin",
132
+ "kir",
133
+ "vol",
134
+ "hau",
135
+ "afr",
136
+ "uig",
137
+ "lao",
138
+ "swe",
139
+ "slv",
140
+ "kor",
141
+ "szl",
142
+ "srp",
143
+ "dty",
144
+ "nrm",
145
+ "dsb",
146
+ "ind",
147
+ "wln",
148
+ "pnb",
149
+ "ukr",
150
+ "bpy",
151
+ "vie",
152
+ "tur",
153
+ "aym",
154
+ "lit",
155
+ "zea",
156
+ "pol",
157
+ "est",
158
+ "scn",
159
+ "vls",
160
+ "stq",
161
+ "gag",
162
+ "grn",
163
+ "kaz",
164
+ "ben",
165
+ "pcd",
166
+ "bjn",
167
+ "krc",
168
+ "amh",
169
+ "diq",
170
+ "ltz",
171
+ "ita",
172
+ "kab",
173
+ "bel",
174
+ "ang",
175
+ "mhr",
176
+ "che",
177
+ "koi",
178
+ "glv",
179
+ "ido",
180
+ "fao",
181
+ "bak",
182
+ "isl",
183
+ "bcl",
184
+ "tet",
185
+ "jpn",
186
+ "kur",
187
+ "map-bms",
188
+ "tyv",
189
+ "olo",
190
+ "arg",
191
+ "ori",
192
+ "lim",
193
+ "tel",
194
+ "lin",
195
+ "roh",
196
+ "sqi",
197
+ "xho",
198
+ "mlg",
199
+ "fas",
200
+ "hbs",
201
+ "tam",
202
+ "aze",
203
+ "lad",
204
+ "nob",
205
+ "sin",
206
+ "gla",
207
+ "nap",
208
+ "snd",
209
+ "ast",
210
+ "mal",
211
+ "mdf",
212
+ "tsn",
213
+ "nds",
214
+ "tgl",
215
+ "nno",
216
+ "sun",
217
+ "lzh",
218
+ "jbo",
219
+ "crh",
220
+ "pap",
221
+ "oci",
222
+ "hak",
223
+ "uzb",
224
+ "zho",
225
+ "hsb",
226
+ "sme",
227
+ "mlt",
228
+ "vep",
229
+ "lez",
230
+ "nld",
231
+ "nds-nl",
232
+ "mrj",
233
+ "spa",
234
+ "ceb",
235
+ "ina",
236
+ "heb",
237
+ "hun",
238
+ "que",
239
+ "kaa",
240
+ "mar",
241
+ "vec",
242
+ "frp",
243
+ "ell",
244
+ "sah",
245
+ "eus",
246
+ "ces",
247
+ "slk",
248
+ "chr",
249
+ "lij",
250
+ "nep",
251
+ "srd",
252
+ "ilo",
253
+ "be-tarask",
254
+ "bod",
255
+ "orm",
256
+ "war",
257
+ "glg",
258
+ "mon",
259
+ "gle",
260
+ "min",
261
+ "ibo",
262
+ "ile",
263
+ "epo",
264
+ "lav",
265
+ "lrc",
266
+ "als",
267
+ "mzn",
268
+ "rup",
269
+ "fur",
270
+ "tat",
271
+ "myv",
272
+ "pan",
273
+ "ton",
274
+ "kom",
275
+ "wuu",
276
+ "tcy",
277
+ "tuk",
278
+ "kan",
279
+ "ltg",
280
+ ]
281
+
282
+
283
+ class Wili2018Dataset(datasets.GeneratorBasedBuilder):
284
+ """A benchmark dataset for language identification and contains 235000 paragraphs of 235 languages."""
285
+
286
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
287
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
288
+
289
+ BUILDER_CONFIGS = [
290
+ SEACrowdConfig(
291
+ name=f"{_DATASETNAME}_source",
292
+ version=SOURCE_VERSION,
293
+ description=f"{_DATASETNAME} source schema",
294
+ schema="source",
295
+ subset_id=_DATASETNAME,
296
+ ),
297
+ SEACrowdConfig(
298
+ name=f"{_DATASETNAME}_seacrowd_text",
299
+ version=SEACROWD_VERSION,
300
+ description=f"{_DATASETNAME} SEACrowd schema",
301
+ schema="seacrowd_text",
302
+ subset_id=_DATASETNAME,
303
+ ),
304
+ ]
305
+
306
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
307
+
308
+ def _info(self) -> datasets.DatasetInfo:
309
+ if self.config.schema == "source":
310
+ features = datasets.Features(
311
+ {
312
+ "sentence": datasets.Value("string"),
313
+ "label": datasets.ClassLabel(names=_CLASSES),
314
+ }
315
+ )
316
+
317
+ elif self.config.schema == "seacrowd_text":
318
+ features = schemas.text_features(_CLASSES)
319
+
320
+ return datasets.DatasetInfo(
321
+ description=_DESCRIPTION,
322
+ features=features,
323
+ homepage=_HOMEPAGE,
324
+ license=_LICENSE,
325
+ citation=_CITATION,
326
+ )
327
+
328
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]:
329
+ """Returns SplitGenerators."""
330
+ urls = _URLS[_DATASETNAME]
331
+ data_dir = dl_manager.download_and_extract(urls)
332
+
333
+ return [
334
+ datasets.SplitGenerator(
335
+ name=datasets.Split.TRAIN,
336
+ gen_kwargs={"filepath": data_dir, "split": "train"},
337
+ ),
338
+ datasets.SplitGenerator(
339
+ name=datasets.Split.TEST,
340
+ gen_kwargs={"filepath": data_dir, "split": "test"},
341
+ ),
342
+ ]
343
+
344
+ def _generate_examples(self, filepath: Path, split: str) -> tuple[int, dict]:
345
+ if self.config.schema == "source":
346
+ with open(filepath[split], encoding="utf-8") as f:
347
+ for i, line in enumerate(f):
348
+ text, label = line.rsplit(",", 1)
349
+ text = text.strip('"')
350
+ label = int(label.strip())
351
+ yield i, {"sentence": text, "label": _CLASSES[label - 1]}
352
+
353
+ elif self.config.schema == "seacrowd_text":
354
+ with open(filepath[split], encoding="utf-8") as f:
355
+ for i, line in enumerate(f):
356
+ text, label = line.rsplit(",", 1)
357
+ text = text.strip('"')
358
+ label = int(label.strip())
359
+ yield i, {"id": str(i), "text": text, "label": _CLASSES[label - 1]}