Datasets:
Add the BIO format for the named entities.
Browse files- MASSIVE.py +54 -0
- README.md +31 -36
- test_MASSIVE.py +10 -5
MASSIVE.py
CHANGED
@@ -285,6 +285,10 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
285 |
"intent": datasets.features.ClassLabel(names=_INTENTS),
|
286 |
"utt": datasets.Value("string"),
|
287 |
"annot_utt": datasets.Value("string"),
|
|
|
|
|
|
|
|
|
288 |
"worker_id": datasets.Value("string"),
|
289 |
"slot_method": datasets.Sequence({
|
290 |
"slot": datasets.Value("string"),
|
@@ -337,6 +341,55 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
337 |
),
|
338 |
]
|
339 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
def _generate_examples(self, filepath, split, lang):
|
341 |
|
342 |
filepath = filepath + "/1.0/data/" + lang + ".jsonl"
|
@@ -391,6 +444,7 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
391 |
"intent": data["intent"],
|
392 |
"utt": data["utt"],
|
393 |
"annot_utt": data["annot_utt"],
|
|
|
394 |
"worker_id": data["worker_id"],
|
395 |
"slot_method": slot_method,
|
396 |
"judgments": judgments,
|
|
|
285 |
"intent": datasets.features.ClassLabel(names=_INTENTS),
|
286 |
"utt": datasets.Value("string"),
|
287 |
"annot_utt": datasets.Value("string"),
|
288 |
+
"annot_utt_bio": datasets.Sequence({
|
289 |
+
"word": datasets.Value("string"),
|
290 |
+
"tag": datasets.Value("string"),
|
291 |
+
}),
|
292 |
"worker_id": datasets.Value("string"),
|
293 |
"slot_method": datasets.Sequence({
|
294 |
"slot": datasets.Value("string"),
|
|
|
341 |
),
|
342 |
]
|
343 |
|
344 |
+
def _getBioFormat(self, text):
|
345 |
+
|
346 |
+
tags, words = [], []
|
347 |
+
|
348 |
+
bio_mode = False
|
349 |
+
cpt_bio = 0
|
350 |
+
current_tag = None
|
351 |
+
|
352 |
+
split_iter = iter(text.split(" "))
|
353 |
+
|
354 |
+
for s in split_iter:
|
355 |
+
|
356 |
+
if s.startswith("["):
|
357 |
+
current_tag = s.strip("[")
|
358 |
+
bio_mode = True
|
359 |
+
cpt_bio += 1
|
360 |
+
next(split_iter)
|
361 |
+
continue
|
362 |
+
|
363 |
+
elif s.endswith("]"):
|
364 |
+
bio_mode = False
|
365 |
+
if cpt_bio == 1:
|
366 |
+
prefix = "B-"
|
367 |
+
else:
|
368 |
+
prefix = "I-"
|
369 |
+
token = prefix + current_tag
|
370 |
+
word = s.strip("]")
|
371 |
+
current_tag = None
|
372 |
+
cpt_bio = 0
|
373 |
+
|
374 |
+
else:
|
375 |
+
|
376 |
+
if bio_mode == True:
|
377 |
+
if cpt_bio == 1:
|
378 |
+
prefix = "B-"
|
379 |
+
else:
|
380 |
+
prefix = "I-"
|
381 |
+
token = prefix + current_tag
|
382 |
+
word = s
|
383 |
+
cpt_bio += 1
|
384 |
+
else:
|
385 |
+
token = "O"
|
386 |
+
word = s
|
387 |
+
|
388 |
+
tags.append(token)
|
389 |
+
words.append(word)
|
390 |
+
|
391 |
+
return [{"word": w, "tag": t} for w, t in zip(words, tags)]
|
392 |
+
|
393 |
def _generate_examples(self, filepath, split, lang):
|
394 |
|
395 |
filepath = filepath + "/1.0/data/" + lang + ".jsonl"
|
|
|
444 |
"intent": data["intent"],
|
445 |
"utt": data["utt"],
|
446 |
"annot_utt": data["annot_utt"],
|
447 |
+
"annot_utt_bio": self._getBioFormat(data["annot_utt"]),
|
448 |
"worker_id": data["worker_id"],
|
449 |
"slot_method": slot_method,
|
450 |
"judgments": judgments,
|
README.md
CHANGED
@@ -258,44 +258,39 @@ print(dataset[0])
|
|
258 |
|
259 |
```json
|
260 |
{
|
261 |
-
"id": "
|
262 |
"locale": "fr-FR",
|
263 |
-
"partition": "
|
264 |
-
"scenario":
|
265 |
-
"intent":
|
266 |
-
"utt": "réveille-moi à
|
267 |
-
"annot_utt": "réveille-moi à [time :
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
"worker_id": "22",
|
269 |
-
"slot_method":
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
"judgments":
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
},
|
282 |
-
{
|
283 |
-
"worker_id": "8",
|
284 |
-
"intent_score": 1,
|
285 |
-
"slots_score": 1,
|
286 |
-
"grammar_score": 4,
|
287 |
-
"spelling_score": 2,
|
288 |
-
"language_identification": "target"
|
289 |
-
},
|
290 |
-
{
|
291 |
-
"worker_id": "0",
|
292 |
-
"intent_score": 1,
|
293 |
-
"slots_score": 1,
|
294 |
-
"grammar_score": 4,
|
295 |
-
"spelling_score": 2,
|
296 |
-
"language_identification": "target"
|
297 |
-
}
|
298 |
-
]
|
299 |
}
|
300 |
```
|
301 |
|
|
|
258 |
|
259 |
```json
|
260 |
{
|
261 |
+
"id": "1",
|
262 |
"locale": "fr-FR",
|
263 |
+
"partition": "train",
|
264 |
+
"scenario": 16,
|
265 |
+
"intent": 48,
|
266 |
+
"utt": "réveille-moi à neuf heures du matin le vendredi",
|
267 |
+
"annot_utt": "réveille-moi à [time : neuf heures du matin] le [date : vendredi]",
|
268 |
+
"annot_utt_bio": {
|
269 |
+
"word": [
|
270 |
+
"réveille-moi",
|
271 |
+
"à",
|
272 |
+
"neuf",
|
273 |
+
"heures",
|
274 |
+
"du",
|
275 |
+
"matin",
|
276 |
+
"le",
|
277 |
+
"vendredi"
|
278 |
+
],
|
279 |
+
"tag": ["O", "O", "B-time", "I-time", "I-time", "I-time", "O", "B-date"]
|
280 |
+
},
|
281 |
"worker_id": "22",
|
282 |
+
"slot_method": {
|
283 |
+
"slot": ["time", "date"],
|
284 |
+
"method": ["translation", "translation"]
|
285 |
+
},
|
286 |
+
"judgments": {
|
287 |
+
"worker_id": ["11", "22", "0"],
|
288 |
+
"intent_score": [2, 1, 1],
|
289 |
+
"slots_score": [1, 1, 1],
|
290 |
+
"grammar_score": [3, 4, 4],
|
291 |
+
"spelling_score": [2, 2, 2],
|
292 |
+
"language_identification": ["target", "target", "target"]
|
293 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
}
|
295 |
```
|
296 |
|
test_MASSIVE.py
CHANGED
@@ -1,14 +1,19 @@
|
|
1 |
-
from datasets import load_dataset
|
2 |
|
3 |
-
|
4 |
-
source = "qanastek/MASSIVE"
|
5 |
|
|
|
|
|
|
|
|
|
6 |
# dataset = load_dataset(source, "fr-FR", download_mode="force_redownload")
|
7 |
# print(dataset)
|
8 |
|
9 |
-
dataset = load_dataset(source, "en-US"
|
|
|
10 |
# , split="train"
|
11 |
print(dataset)
|
12 |
|
13 |
# print(dataset[0])
|
14 |
-
|
|
|
|
1 |
+
from datasets import load_dataset, set_caching_enabled
|
2 |
|
3 |
+
set_caching_enabled(False)
|
|
|
4 |
|
5 |
+
source = "MASSIVE.py"
|
6 |
+
# source = "qanastek/MASSIVE"
|
7 |
+
|
8 |
+
dataset = load_dataset(source, "fr-FR")
|
9 |
# dataset = load_dataset(source, "fr-FR", download_mode="force_redownload")
|
10 |
# print(dataset)
|
11 |
|
12 |
+
# dataset = load_dataset(source, "en-US")
|
13 |
+
# dataset = load_dataset(source, "en-US", download_mode="force_redownload")
|
14 |
# , split="train"
|
15 |
print(dataset)
|
16 |
|
17 |
# print(dataset[0])
|
18 |
+
f = dataset["train"][0]
|
19 |
+
print(f)
|