Datasets:
File size: 13,588 Bytes
502c746 d0aad6e 502c746 09e2f84 502c746 09e2f84 502c746 75a635f fb3b08a 714876f 502c746 75a635f 502c746 75a635f 714876f 75a635f 714876f 502c746 fb3b08a 502c746 8f4a42c 502c746 8f4a42c 502c746 8f4a42c 502c746 8f4a42c 502c746 884a611 fb3b08a 884a611 fb3b08a 884a611 fb3b08a 884a611 8f4a42c 502c746 75a635f 502c746 75a635f 8f4a42c 75a635f 502c746 75a635f 502c746 8f4a42c 502c746 8f4a42c 502c746 75a635f 8f4a42c 502c746 75a635f 502c746 75a635f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
# coding=utf-8
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@misc{fitzgerald2022massive,
title={MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages},
author={Jack FitzGerald and Christopher Hench and Charith Peris and Scott Mackie and Kay Rottmann and Ana Sanchez and Aaron Nash and Liam Urbach and Vishesh Kakarala and Richa Singh and Swetha Ranganath and Laurie Crist and Misha Britan and Wouter Leeuwis and Gokhan Tur and Prem Natarajan},
year={2022},
eprint={2204.08582},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@inproceedings{bastianelli-etal-2020-slurp,
title = "{SLURP}: A Spoken Language Understanding Resource Package",
author = "Bastianelli, Emanuele and
Vanzo, Andrea and
Swietojanski, Pawel and
Rieser, Verena",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.588",
doi = "10.18653/v1/2020.emnlp-main.588",
pages = "7252--7262",
abstract = "Spoken Language Understanding infers semantic meaning directly from audio data, and thus promises to reduce error propagation and misunderstandings in end-user applications. However, publicly available SLU resources are limited. In this paper, we release SLURP, a new SLU package containing the following: (1) A new challenging dataset in English spanning 18 domains, which is substantially bigger and linguistically more diverse than existing datasets; (2) Competitive baselines based on state-of-the-art NLU and ASR systems; (3) A new transparent metric for entity labelling which enables a detailed error analysis for identifying potential areas of improvement. SLURP is available at https://github.com/pswietojanski/slurp."
}
"""
_LANGUAGE_PAIRS = ['af-ZA', 'am-ET', 'ar-SA', 'az-AZ', 'bn-BD', 'cy-GB', 'da-DK', 'de-DE', 'el-GR', 'en-US', 'es-ES', 'fa-IR', 'fi-FI', 'fr-FR', 'he-IL', 'hi-IN', 'hu-HU', 'hy-AM', 'id-ID', 'is-IS', 'it-IT', 'ja-JP', 'jv-ID', 'ka-GE', 'km-KH', 'kn-IN', 'ko-KR', 'lv-LV', 'ml-IN', 'mn-MN', 'ms-MY', 'my-MM', 'nb-NO', 'nl-NL', 'pl-PL', 'pt-PT', 'ro-RO', 'ru-RU', 'sl-SL', 'sq-AL', 'sv-SE', 'sw-KE', 'ta-IN', 'te-IN', 'th-TH', 'tl-PH', 'tr-TR', 'ur-PK', 'vi-VN', 'zh-CN', 'zh-TW']
_LICENSE = "cc-by-4-0"
_DESCRIPTION = """
MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
for the Natural Language Understanding tasks of intent prediction and slot annotation.
Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions.
"""
_URL = "https://amazon-massive-nlu-dataset.s3.amazonaws.com/amazon-massive-dataset-1.0.tar.gz"
_SCENARIOS = ['calendar', 'recommendation', 'social', 'general', 'news', 'cooking', 'iot', 'email', 'weather', 'alarm', 'transport', 'lists', 'takeaway', 'play', 'audio', 'music', 'qa', 'datetime']
_INTENTS = ['audio_volume_other', 'play_music', 'iot_hue_lighton', 'general_greet', 'calendar_set', 'audio_volume_down', 'social_query', 'audio_volume_mute', 'iot_wemo_on', 'iot_hue_lightup', 'audio_volume_up', 'iot_coffee', 'takeaway_query', 'qa_maths', 'play_game', 'cooking_query', 'iot_hue_lightdim', 'iot_wemo_off', 'music_settings', 'weather_query', 'news_query', 'alarm_remove', 'social_post', 'recommendation_events', 'transport_taxi', 'takeaway_order', 'music_query', 'calendar_query', 'lists_query', 'qa_currency', 'recommendation_movies', 'general_joke', 'recommendation_locations', 'email_querycontact', 'lists_remove', 'play_audiobook', 'email_addcontact', 'lists_createoradd', 'play_radio', 'qa_stock', 'alarm_query', 'email_sendemail', 'general_quirky', 'music_likeness', 'cooking_recipe', 'email_query', 'datetime_query', 'transport_traffic', 'play_podcasts', 'iot_hue_lightchange', 'calendar_remove', 'transport_query', 'transport_ticket', 'qa_factoid', 'iot_cleaning', 'alarm_set', 'datetime_convert', 'iot_hue_lightoff', 'qa_definition', 'music_dislikeness']
_TAGS = ['O', 'B-food_type', 'B-movie_type', 'B-person', 'B-change_amount', 'I-relation', 'I-game_name', 'B-date', 'B-movie_name', 'I-person', 'I-place_name', 'I-podcast_descriptor', 'I-audiobook_name', 'B-email_folder', 'B-coffee_type', 'B-app_name', 'I-time', 'I-coffee_type', 'B-transport_agency', 'B-podcast_descriptor', 'I-playlist_name', 'B-media_type', 'B-song_name', 'I-music_descriptor', 'I-song_name', 'B-event_name', 'I-timeofday', 'B-alarm_type', 'B-cooking_type', 'I-business_name', 'I-color_type', 'B-podcast_name', 'I-personal_info', 'B-weather_descriptor', 'I-list_name', 'B-transport_descriptor', 'I-game_type', 'I-date', 'B-place_name', 'B-color_type', 'B-game_name', 'I-artist_name', 'I-drink_type', 'B-business_name', 'B-timeofday', 'B-sport_type', 'I-player_setting', 'I-transport_agency', 'B-game_type', 'B-player_setting', 'I-music_album', 'I-event_name', 'I-general_frequency', 'I-podcast_name', 'I-cooking_type', 'I-radio_name', 'I-joke_type', 'I-meal_type', 'I-transport_type', 'B-joke_type', 'B-time', 'B-order_type', 'B-business_type', 'B-general_frequency', 'I-food_type', 'I-time_zone', 'B-currency_name', 'B-time_zone', 'B-ingredient', 'B-house_place', 'B-audiobook_name', 'I-ingredient', 'I-media_type', 'I-news_topic', 'B-music_genre', 'I-definition_word', 'B-list_name', 'B-playlist_name', 'B-email_address', 'I-currency_name', 'I-movie_name', 'I-device_type', 'I-weather_descriptor', 'B-audiobook_author', 'I-audiobook_author', 'I-app_name', 'I-order_type', 'I-transport_name', 'B-radio_name', 'I-business_type', 'B-definition_word', 'B-artist_name', 'I-movie_type', 'B-transport_name', 'I-email_folder', 'B-music_album', 'I-house_place', 'I-music_genre', 'B-drink_type', 'I-alarm_type', 'B-music_descriptor', 'B-news_topic', 'B-meal_type', 'I-transport_descriptor', 'I-email_address', 'I-change_amount', 'B-device_type', 'B-transport_type', 'B-relation', 'I-sport_type', 'B-personal_info']
_ALL = "all"
class MASSIVE(datasets.GeneratorBasedBuilder):
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name = name,
version = datasets.Version("1.0.0"),
description = f"The MASSIVE corpora for {name}",
) for name in _LANGUAGE_PAIRS
]
BUILDER_CONFIGS.append(datasets.BuilderConfig(
name = _ALL,
version = datasets.Version("1.0.0"),
description = f"The MASSIVE corpora for entire corpus",
))
DEFAULT_CONFIG_NAME = _ALL
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"locale": datasets.Value("string"),
"partition": datasets.Value("string"),
"scenario": datasets.features.ClassLabel(names=_SCENARIOS),
"intent": datasets.features.ClassLabel(names=_INTENTS),
"utt": datasets.Value("string"),
"annot_utt": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names = _TAGS
)
),
"worker_id": datasets.Value("string"),
"slot_method": datasets.Sequence({
"slot": datasets.Value("string"),
"method": datasets.Value("string"),
}),
"judgments": datasets.Sequence({
"worker_id": datasets.Value("string"),
"intent_score": datasets.Value("int8"), # [0, 1, 2]
"slots_score": datasets.Value("int8"), # [0, 1, 2]
"grammar_score": datasets.Value("int8"), # [0, 1, 2, 3, 4]
"spelling_score": datasets.Value("int8"), # [0, 1, 2]
"language_identification": datasets.Value("string"),
}),
},
),
supervised_keys=None,
homepage="https://github.com/alexa/massive",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"split": "train",
"lang": self.config.name,
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"split": "dev",
"lang": self.config.name,
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"split": "test",
"lang": self.config.name,
}
),
]
def _getBioFormat(self, text):
tags, tokens = [], []
bio_mode = False
cpt_bio = 0
current_tag = None
split_iter = iter(text.split(" "))
for s in split_iter:
if s.startswith("["):
current_tag = s.strip("[")
bio_mode = True
cpt_bio += 1
next(split_iter)
continue
elif s.endswith("]"):
bio_mode = False
if cpt_bio == 1:
prefix = "B-"
else:
prefix = "I-"
token = prefix + current_tag
word = s.strip("]")
current_tag = None
cpt_bio = 0
else:
if bio_mode == True:
if cpt_bio == 1:
prefix = "B-"
else:
prefix = "I-"
token = prefix + current_tag
word = s
cpt_bio += 1
else:
token = "O"
word = s
tags.append(token)
tokens.append(word)
return tokens, tags
def _generate_examples(self, files, split, lang):
key_ = 0
if lang == "all":
lang = _LANGUAGE_PAIRS.copy()
else:
lang = [lang]
logger.info("⏳ Generating examples from = %s", ", ".join(lang))
for path, f in files:
l = path.split("1.0/data/")[-1].split(".jsonl")[0]
if not lang:
break
elif l in lang:
lang.remove(l)
else:
continue
# Read the file
lines = f.read().decode(encoding="utf-8").split("\n")
for line in lines:
data = json.loads(line)
if data["partition"] != split:
continue
# Slot method
if "slot_method" in data:
slot_method = [
{
"slot": s["slot"],
"method": s["method"],
} for s in data["slot_method"]
]
else:
slot_method = []
# Judgments
if "judgments" in data:
judgments = [
{
"worker_id": j["worker_id"],
"intent_score": j["intent_score"],
"slots_score": j["slots_score"],
"grammar_score": j["grammar_score"],
"spelling_score": j["spelling_score"],
"language_identification": j["language_identification"] if "language_identification" in j else "target",
} for j in data["judgments"]
]
else:
judgments = []
tokens, tags = self._getBioFormat(data["annot_utt"])
yield key_, {
"id": data["id"],
"locale": data["locale"],
"partition": data["partition"],
"scenario": data["scenario"],
"intent": data["intent"],
"utt": data["utt"],
"annot_utt": data["annot_utt"],
"tokens": tokens,
"ner_tags": tags,
"worker_id": data["worker_id"],
"slot_method": slot_method,
"judgments": judgments,
}
key_ += 1
|