baber commited on
Commit
b4e5dbc
1 Parent(s): 1e90bce

Create headqa.py

Browse files
Files changed (1) hide show
  1. headqa.py +165 -0
headqa.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+ # NOTE: This is an exact copy of
16
+ # https://github.com/huggingface/datasets/blob/3804442bb7cfcb9d52044d92688115cfdc69c2da/datasets/head_qa/head_qa.py
17
+ # with the exception of the `image` feature. This is to avoid adding `Pillow`
18
+ # as a dependency.
19
+ """HEAD-QA: A Healthcare Dataset for Complex Reasoning."""
20
+
21
+
22
+ import json
23
+ import os
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """\
29
+ @inproceedings{vilares-gomez-rodriguez-2019-head,
30
+ title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning",
31
+ author = "Vilares, David and
32
+ G{\'o}mez-Rodr{\'i}guez, Carlos",
33
+ booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
34
+ month = jul,
35
+ year = "2019",
36
+ address = "Florence, Italy",
37
+ publisher = "Association for Computational Linguistics",
38
+ url = "https://www.aclweb.org/anthology/P19-1092",
39
+ doi = "10.18653/v1/P19-1092",
40
+ pages = "960--966",
41
+ abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.",
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """\
46
+ HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the
47
+ Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio
48
+ de Sanidad, Consumo y Bienestar Social.
49
+ The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology.
50
+ """
51
+
52
+ _HOMEPAGE = "https://aghie.github.io/head-qa/"
53
+
54
+ # The Spanish data comes from the "Ministerio de Sanidad, Consumo y Bienestar Social", as indicated here : https://github.com/aghie/head-qa
55
+ # This Spanish data seems to follow the intellectual property rights stated here : https://www.sanidad.gob.es/avisoLegal/home.htm
56
+ # The English data was translated by the authors of head-qa (https://arxiv.org/pdf/1906.04701.pdf).
57
+ _LICENSE = "Custom license"
58
+
59
+ _URL = "https://drive.google.com/uc?export=download&confirm=t&id=1a_95N5zQQoUCq8IBNVZgziHbeM-QxG2t"
60
+
61
+ _DIRS = {"es": "HEAD", "en": "HEAD_EN"}
62
+
63
+
64
+ class HeadQA(datasets.GeneratorBasedBuilder):
65
+ """HEAD-QA: A Healthcare Dataset for Complex Reasoning"""
66
+
67
+ VERSION = datasets.Version("1.1.0")
68
+
69
+ BUILDER_CONFIGS = [
70
+ datasets.BuilderConfig(
71
+ name="es", version=VERSION, description="Spanish HEAD dataset"
72
+ ),
73
+ datasets.BuilderConfig(
74
+ name="en", version=VERSION, description="English HEAD dataset"
75
+ ),
76
+ ]
77
+
78
+ DEFAULT_CONFIG_NAME = "es"
79
+
80
+ def _info(self):
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=datasets.Features(
84
+ {
85
+ "name": datasets.Value("string"),
86
+ "year": datasets.Value("string"),
87
+ "category": datasets.Value("string"),
88
+ "qid": datasets.Value("int32"),
89
+ "qtext": datasets.Value("string"),
90
+ "ra": datasets.Value("int32"),
91
+ "answers": [
92
+ {
93
+ "aid": datasets.Value("int32"),
94
+ "atext": datasets.Value("string"),
95
+ }
96
+ ],
97
+ }
98
+ ),
99
+ supervised_keys=None,
100
+ homepage=_HOMEPAGE,
101
+ license=_LICENSE,
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ """Returns SplitGenerators."""
107
+ data_dir = dl_manager.download_and_extract(_URL)
108
+
109
+ dir = _DIRS[self.config.name]
110
+ data_lang_dir = os.path.join(data_dir, dir)
111
+
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs={
116
+ "data_dir": data_dir,
117
+ "filepath": os.path.join(data_lang_dir, f"train_{dir}.json"),
118
+ },
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TEST,
122
+ gen_kwargs={
123
+ "data_dir": data_dir,
124
+ "filepath": os.path.join(data_lang_dir, f"test_{dir}.json"),
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.VALIDATION,
129
+ gen_kwargs={
130
+ "data_dir": data_dir,
131
+ "filepath": os.path.join(data_lang_dir, f"dev_{dir}.json"),
132
+ },
133
+ ),
134
+ ]
135
+
136
+ def _generate_examples(self, data_dir, filepath):
137
+ """Yields examples."""
138
+ with open(filepath, encoding="utf-8") as f:
139
+ head_qa = json.load(f)
140
+ for exam_id, exam in enumerate(head_qa["exams"]):
141
+ content = head_qa["exams"][exam]
142
+ name = content["name"].strip()
143
+ year = content["year"].strip()
144
+ category = content["category"].strip()
145
+ for question in content["data"]:
146
+ qid = int(question["qid"].strip())
147
+ qtext = question["qtext"].strip()
148
+ ra = int(question["ra"].strip())
149
+
150
+ aids = [answer["aid"] for answer in question["answers"]]
151
+ atexts = [answer["atext"].strip() for answer in question["answers"]]
152
+ answers = [
153
+ {"aid": aid, "atext": atext} for aid, atext in zip(aids, atexts)
154
+ ]
155
+
156
+ id_ = f"{exam_id}_{qid}"
157
+ yield id_, {
158
+ "name": name,
159
+ "year": year,
160
+ "category": category,
161
+ "qid": qid,
162
+ "qtext": qtext,
163
+ "ra": ra,
164
+ "answers": answers,
165
+ }