TheTung commited on
Commit
f61b365
·
verified ·
1 Parent(s): dd3d96f

Add main file

Browse files

This file is modified from the original mlqa.py code to make the data similar to other QA datasets.

Files changed (1) hide show
  1. mlqa.py +206 -0
mlqa.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(mlqa): Add a description here."""
2
+
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+
10
+ # TODO(mlqa): BibTeX citation
11
+ _CITATION = """\
12
+ @article{lewis2019mlqa,
13
+ title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
14
+ author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},
15
+ journal={arXiv preprint arXiv:1910.07475},
16
+ year={2019}
17
+ }
18
+ """
19
+
20
+ # TODO(mlqa):
21
+ _DESCRIPTION = """\
22
+ MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
23
+ MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
24
+ German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
25
+ 4 different languages on average.
26
+ """
27
+ _URL = "https://dl.fbaipublicfiles.com/MLQA/"
28
+ _DEV_TEST_URL = "MLQA_V1.zip"
29
+ _TRANSLATE_TEST_URL = "mlqa-translate-test.tar.gz"
30
+ _TRANSLATE_TRAIN_URL = "mlqa-translate-train.tar.gz"
31
+ _LANG = ["ar", "de", "vi", "zh", "en", "es", "hi"]
32
+ _TRANSLATE_LANG = ["ar", "de", "vi", "zh", "es", "hi"]
33
+
34
+
35
+ class MlqaConfig(datasets.BuilderConfig):
36
+ def __init__(self, data_url, **kwargs):
37
+ """BuilderConfig for MLQA
38
+
39
+ Args:
40
+ data_url: `string`, url to the dataset
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ super(MlqaConfig, self).__init__(
44
+ version=datasets.Version(
45
+ "1.0.0",
46
+ ),
47
+ **kwargs,
48
+ )
49
+ self.data_url = data_url
50
+
51
+
52
+ class Mlqa(datasets.GeneratorBasedBuilder):
53
+ """TODO(mlqa): Short description of my dataset."""
54
+
55
+ # TODO(mlqa): Set up version.
56
+ VERSION = datasets.Version("1.0.0")
57
+ BUILDER_CONFIGS = (
58
+ [
59
+ MlqaConfig(
60
+ name="mlqa-translate-train." + lang,
61
+ data_url=_URL + _TRANSLATE_TRAIN_URL,
62
+ description="Machine-translated data for Translate-train (SQuAD Train and Dev sets machine-translated into "
63
+ "Arabic, German, Hindi, Vietnamese, Simplified Chinese and Spanish)",
64
+ )
65
+ for lang in _LANG
66
+ if lang != "en"
67
+ ]
68
+ + [
69
+ MlqaConfig(
70
+ name="mlqa-translate-test." + lang,
71
+ data_url=_URL + _TRANSLATE_TEST_URL,
72
+ description="Machine-translated data for Translate-Test (MLQA-test set machine-translated into English) ",
73
+ )
74
+ for lang in _LANG
75
+ if lang != "en"
76
+ ]
77
+ + [
78
+ MlqaConfig(
79
+ name="mlqa." + lang1 + "." + lang2,
80
+ data_url=_URL + _DEV_TEST_URL,
81
+ description="development and test splits",
82
+ )
83
+ for lang1 in _LANG
84
+ for lang2 in _LANG
85
+ ]
86
+ )
87
+
88
+ def _info(self):
89
+ # TODO(mlqa): Specifies the datasets.DatasetInfo object
90
+ return datasets.DatasetInfo(
91
+ # This is the description that will appear on the datasets page.
92
+ description=_DESCRIPTION,
93
+ # datasets.features.FeatureConnectors
94
+ features=datasets.Features(
95
+ {
96
+ "context": datasets.Value("string"),
97
+ "question": datasets.Value("string"),
98
+ "answers": datasets.features.Sequence(
99
+ {"text": datasets.Value("string"), "answer_start": datasets.Value("int32")}
100
+ ),
101
+ "id": datasets.Value("string"),
102
+ # These are the features of your dataset like images, labels ...
103
+ }
104
+ ),
105
+ # If there's a common (input, target) tuple from the features,
106
+ # specify them here. They'll be used if as_supervised=True in
107
+ # builder.as_dataset.
108
+ supervised_keys=None,
109
+ # Homepage of the dataset for documentation
110
+ homepage="https://github.com/facebookresearch/MLQA",
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ """Returns SplitGenerators."""
116
+ # TODO(mlqa): Downloads the data and defines the splits
117
+ # dl_manager is a datasets.download.DownloadManager that can be used to
118
+ # download and extract URLs
119
+ if self.config.name.startswith("mlqa-translate-train"):
120
+ archive = dl_manager.download(self.config.data_url)
121
+ lang = self.config.name.split(".")[-1]
122
+ return [
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TRAIN,
125
+ # These kwargs will be passed to _generate_examples
126
+ gen_kwargs={
127
+ "filepath": f"mlqa-translate-train/{lang}_squad-translate-train-train-v1.1.json",
128
+ "files": dl_manager.iter_archive(archive),
129
+ },
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.VALIDATION,
133
+ # These kwargs will be passed to _generate_examples
134
+ gen_kwargs={
135
+ "filepath": f"mlqa-translate-train/{lang}_squad-translate-train-dev-v1.1.json",
136
+ "files": dl_manager.iter_archive(archive),
137
+ },
138
+ ),
139
+ ]
140
+
141
+ else:
142
+ if self.config.name.startswith("mlqa."):
143
+ dl_file = dl_manager.download_and_extract(self.config.data_url)
144
+ name = self.config.name.split(".")
145
+ l1, l2 = name[1:]
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TEST,
149
+ # These kwargs will be passed to _generate_examples
150
+ gen_kwargs={
151
+ "filepath": os.path.join(
152
+ os.path.join(dl_file, "MLQA_V1/test"),
153
+ f"test-context-{l1}-question-{l2}.json",
154
+ )
155
+ },
156
+ ),
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.VALIDATION,
159
+ # These kwargs will be passed to _generate_examples
160
+ gen_kwargs={
161
+ "filepath": os.path.join(
162
+ os.path.join(dl_file, "MLQA_V1/dev"), f"dev-context-{l1}-question-{l2}.json"
163
+ )
164
+ },
165
+ ),
166
+ ]
167
+ else:
168
+ if self.config.name.startswith("mlqa-translate-test"):
169
+ archive = dl_manager.download(self.config.data_url)
170
+ lang = self.config.name.split(".")[-1]
171
+ return [
172
+ datasets.SplitGenerator(
173
+ name=datasets.Split.TEST,
174
+ # These kwargs will be passed to _generate_examples
175
+ gen_kwargs={
176
+ "filepath": f"mlqa-translate-test/translate-test-context-{lang}-question-{lang}.json",
177
+ "files": dl_manager.iter_archive(archive),
178
+ },
179
+ ),
180
+ ]
181
+
182
+ def _generate_examples(self, filepath, files=None):
183
+ """Yields examples."""
184
+ if self.config.name.startswith("mlqa-translate"):
185
+ for path, f in files:
186
+ if path == filepath:
187
+ data = json.loads(f.read().decode("utf-8"))
188
+ break
189
+ else:
190
+ with open(filepath, encoding="utf-8") as f:
191
+ data = json.load(f)
192
+ for examples in data["data"]:
193
+ for example in examples["paragraphs"]:
194
+ context = example["context"]
195
+ for qa in example["qas"]:
196
+ question = qa["question"]
197
+ id_ = qa["id"]
198
+ answers = qa["answers"]
199
+ answers_start = [answer["answer_start"] for answer in answers]
200
+ answers_text = [answer["text"] for answer in answers]
201
+ yield id_, {
202
+ "context": context,
203
+ "question": question,
204
+ "answers": {"answer_start": answers_start, "text": answers_text},
205
+ "id": id_,
206
+ }