Convert dataset to Parquet

#4
by albertvillanova HF staff - opened
README.md CHANGED
@@ -258,6 +258,7 @@ language_bcp47:
258
  tags:
259
  - language-identification
260
  dataset_info:
 
261
  features:
262
  - name: sentence
263
  dtype: string
@@ -500,16 +501,23 @@ dataset_info:
500
  '232': tuk
501
  '233': kan
502
  '234': ltg
503
- config_name: WiLI-2018 dataset
504
  splits:
505
  - name: train
506
- num_bytes: 65408201
507
  num_examples: 117500
508
  - name: test
509
- num_bytes: 66491260
510
  num_examples: 117500
511
- download_size: 130516351
512
- dataset_size: 131899461
 
 
 
 
 
 
 
 
513
  ---
514
 
515
  # Dataset Card for wili_2018
 
258
  tags:
259
  - language-identification
260
  dataset_info:
261
+ config_name: WiLI-2018 dataset
262
  features:
263
  - name: sentence
264
  dtype: string
 
501
  '232': tuk
502
  '233': kan
503
  '234': ltg
 
504
  splits:
505
  - name: train
506
+ num_bytes: 65408153
507
  num_examples: 117500
508
  - name: test
509
+ num_bytes: 66491212
510
  num_examples: 117500
511
+ download_size: 91718265
512
+ dataset_size: 131899365
513
+ configs:
514
+ - config_name: WiLI-2018 dataset
515
+ data_files:
516
+ - split: train
517
+ path: WiLI-2018 dataset/train-*
518
+ - split: test
519
+ path: WiLI-2018 dataset/test-*
520
+ default: true
521
  ---
522
 
523
  # Dataset Card for wili_2018
WiLI-2018 dataset/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a1b582dbc8fc71d6baabc9574835d4a5d925b21f9ab2fcea49c7c4e86acc0df
3
+ size 46000315
WiLI-2018 dataset/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:816e63cff8d7d3da5d9a2aaab68527f9d28e9efd22dab45ca0a9b9517c52ecea
3
+ size 45717950
wili_2018.py DELETED
@@ -1,334 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """WiLI-2018, the Wikipedia language identification benchmark dataset"""
17
-
18
-
19
- import datasets
20
- from datasets.tasks import TextClassification
21
-
22
-
23
- _CITATION = """\
24
- @dataset{thoma_martin_2018_841984,
25
- author = {Thoma, Martin},
26
- title = {{WiLI-2018 - Wikipedia Language Identification database}},
27
- month = jan,
28
- year = 2018,
29
- publisher = {Zenodo},
30
- version = {1.0.0},
31
- doi = {10.5281/zenodo.841984},
32
- url = {https://doi.org/10.5281/zenodo.841984}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- It is a benchmark dataset for language identification and contains 235000 paragraphs of 235 languages
38
- """
39
-
40
- # TODO: Add a link to an official homepage for the dataset here
41
- _HOMEPAGE = "https://zenodo.org/record/841984"
42
-
43
- # TODO: Add the licence for the dataset here if you can find it
44
- _LICENSE = "ODC Open Database License v1.0"
45
-
46
-
47
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
48
- _TRAIN_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1ZzlIQvw1KNBG97QQCfdatvVrrbeLaM1u"
49
- _TEST_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1Xx4kFc1Xdzz8AhDasxZ0cSa-a35EQSDZ"
50
-
51
- _CLASSES = [
52
- "cdo",
53
- "glk",
54
- "jam",
55
- "lug",
56
- "san",
57
- "rue",
58
- "wol",
59
- "new",
60
- "mwl",
61
- "bre",
62
- "ara",
63
- "hye",
64
- "xmf",
65
- "ext",
66
- "cor",
67
- "yor",
68
- "div",
69
- "asm",
70
- "lat",
71
- "cym",
72
- "hif",
73
- "ace",
74
- "kbd",
75
- "tgk",
76
- "rus",
77
- "nso",
78
- "mya",
79
- "msa",
80
- "ava",
81
- "cbk",
82
- "urd",
83
- "deu",
84
- "swa",
85
- "pus",
86
- "bxr",
87
- "udm",
88
- "csb",
89
- "yid",
90
- "vro",
91
- "por",
92
- "pdc",
93
- "eng",
94
- "tha",
95
- "hat",
96
- "lmo",
97
- "pag",
98
- "jav",
99
- "chv",
100
- "nan",
101
- "sco",
102
- "kat",
103
- "bho",
104
- "bos",
105
- "kok",
106
- "oss",
107
- "mri",
108
- "fry",
109
- "cat",
110
- "azb",
111
- "kin",
112
- "hin",
113
- "sna",
114
- "dan",
115
- "egl",
116
- "mkd",
117
- "ron",
118
- "bul",
119
- "hrv",
120
- "som",
121
- "pam",
122
- "nav",
123
- "ksh",
124
- "nci",
125
- "khm",
126
- "sgs",
127
- "srn",
128
- "bar",
129
- "cos",
130
- "ckb",
131
- "pfl",
132
- "arz",
133
- "roa-tara",
134
- "fra",
135
- "mai",
136
- "zh-yue",
137
- "guj",
138
- "fin",
139
- "kir",
140
- "vol",
141
- "hau",
142
- "afr",
143
- "uig",
144
- "lao",
145
- "swe",
146
- "slv",
147
- "kor",
148
- "szl",
149
- "srp",
150
- "dty",
151
- "nrm",
152
- "dsb",
153
- "ind",
154
- "wln",
155
- "pnb",
156
- "ukr",
157
- "bpy",
158
- "vie",
159
- "tur",
160
- "aym",
161
- "lit",
162
- "zea",
163
- "pol",
164
- "est",
165
- "scn",
166
- "vls",
167
- "stq",
168
- "gag",
169
- "grn",
170
- "kaz",
171
- "ben",
172
- "pcd",
173
- "bjn",
174
- "krc",
175
- "amh",
176
- "diq",
177
- "ltz",
178
- "ita",
179
- "kab",
180
- "bel",
181
- "ang",
182
- "mhr",
183
- "che",
184
- "koi",
185
- "glv",
186
- "ido",
187
- "fao",
188
- "bak",
189
- "isl",
190
- "bcl",
191
- "tet",
192
- "jpn",
193
- "kur",
194
- "map-bms",
195
- "tyv",
196
- "olo",
197
- "arg",
198
- "ori",
199
- "lim",
200
- "tel",
201
- "lin",
202
- "roh",
203
- "sqi",
204
- "xho",
205
- "mlg",
206
- "fas",
207
- "hbs",
208
- "tam",
209
- "aze",
210
- "lad",
211
- "nob",
212
- "sin",
213
- "gla",
214
- "nap",
215
- "snd",
216
- "ast",
217
- "mal",
218
- "mdf",
219
- "tsn",
220
- "nds",
221
- "tgl",
222
- "nno",
223
- "sun",
224
- "lzh",
225
- "jbo",
226
- "crh",
227
- "pap",
228
- "oci",
229
- "hak",
230
- "uzb",
231
- "zho",
232
- "hsb",
233
- "sme",
234
- "mlt",
235
- "vep",
236
- "lez",
237
- "nld",
238
- "nds-nl",
239
- "mrj",
240
- "spa",
241
- "ceb",
242
- "ina",
243
- "heb",
244
- "hun",
245
- "que",
246
- "kaa",
247
- "mar",
248
- "vec",
249
- "frp",
250
- "ell",
251
- "sah",
252
- "eus",
253
- "ces",
254
- "slk",
255
- "chr",
256
- "lij",
257
- "nep",
258
- "srd",
259
- "ilo",
260
- "be-tarask",
261
- "bod",
262
- "orm",
263
- "war",
264
- "glg",
265
- "mon",
266
- "gle",
267
- "min",
268
- "ibo",
269
- "ile",
270
- "epo",
271
- "lav",
272
- "lrc",
273
- "als",
274
- "mzn",
275
- "rup",
276
- "fur",
277
- "tat",
278
- "myv",
279
- "pan",
280
- "ton",
281
- "kom",
282
- "wuu",
283
- "tcy",
284
- "tuk",
285
- "kan",
286
- "ltg",
287
- ]
288
-
289
-
290
- class Wili_2018(datasets.GeneratorBasedBuilder):
291
- """WiLI Language Identification Dataset"""
292
-
293
- VERSION = datasets.Version("1.1.0")
294
-
295
- BUILDER_CONFIGS = [
296
- datasets.BuilderConfig(
297
- name="WiLI-2018 dataset",
298
- version=VERSION,
299
- description="Plain text of import of WiLI-2018",
300
- )
301
- ]
302
-
303
- def _info(self):
304
-
305
- return datasets.DatasetInfo(
306
- # This is the description that will appear on the datasets page.
307
- description=_DESCRIPTION,
308
- # This defines the different columns of the dataset and their types
309
- features=datasets.Features(
310
- {"sentence": datasets.Value("string"), "label": datasets.features.ClassLabel(names=_CLASSES)}
311
- ),
312
- supervised_keys=None,
313
- homepage=_HOMEPAGE,
314
- license=_LICENSE,
315
- citation=_CITATION,
316
- task_templates=[TextClassification(text_column="sentence", label_column="label")],
317
- )
318
-
319
- def _split_generators(self, dl_manager):
320
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
321
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
322
- return [
323
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
324
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
325
- ]
326
-
327
- def _generate_examples(self, filepath):
328
-
329
- with open(filepath, encoding="utf-8") as f:
330
- for id_, line in enumerate(f):
331
- text, label = line.rsplit(",", 1)
332
- text = text.strip('"')
333
- label = int(label.strip())
334
- yield id_, {"sentence": text, "label": label - 1}