Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
a54c3a2
·
1 Parent(s): 00e441e

Delete tmp-scitail.py

Browse files
Files changed (1) hide show
  1. tmp-scitail.py +0 -189
tmp-scitail.py DELETED
@@ -1,189 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """
17
- The SciTail dataset is an entailment dataset created from multiple-choice science exams and
18
- web sentences. Each question and the correct answer choice are converted into an assertive
19
- statement to form the hypothesis. We use information retrieval to obtain relevant text from
20
- a large text corpus of web sentences, and use these sentences as a premise P. We crowdsource
21
- the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order
22
- to create the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with
23
- entails label and 16,925 examples with neutral label.
24
- """
25
- from dataclasses import dataclass
26
- import os
27
-
28
- import datasets
29
- import pandas as pd
30
-
31
-
32
- @dataclass
33
- class BigBioConfig(datasets.BuilderConfig):
34
- """BuilderConfig for BigBio."""
35
-
36
- name: str = None
37
- version: datasets.Version = None
38
- description: str = None
39
- schema: str = None
40
- subset_id: str = None
41
-
42
- _LANGUAGES = ["EN"]
43
- _PUBMED = False
44
- _LOCAL = False
45
- _CITATION = """\
46
- @inproceedings{scitail,
47
- author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
48
- booktitle = {AAAI}
49
- title = {SciTail: A Textual Entailment Dataset from Science Question Answering},
50
- year = {2018}
51
- }
52
- """
53
-
54
- _DATASETNAME = "scitail"
55
- _DISPLAYNAME = "SciTail"
56
-
57
- _DESCRIPTION = """\
58
- The SciTail dataset is an entailment dataset created from multiple-choice science exams and
59
- web sentences. Each question and the correct answer choice are converted into an assertive
60
- statement to form the hypothesis. We use information retrieval to obtain relevant text from
61
- a large text corpus of web sentences, and use these sentences as a premise P. We crowdsource
62
- the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order
63
- to create the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with
64
- entails label and 16,925 examples with neutral label.
65
- """
66
-
67
- _HOMEPAGE = "https://allenai.org/data/scitail"
68
-
69
- _LICENSE = "Apache 2.0"
70
-
71
- _URLS = {
72
- _DATASETNAME: "https://ai2-public-datasets.s3.amazonaws.com/scitail/SciTailV1.1.zip",
73
- }
74
-
75
- _SUPPORTED_TASKS = ["TEXTUAL_ENTAILMENT"]
76
-
77
- _SOURCE_VERSION = "1.1.0"
78
-
79
- _BIGBIO_VERSION = "1.0.0"
80
-
81
-
82
- LABEL_MAP = {"entails": "entailment", "neutral": "neutral"}
83
-
84
- entailment_features = datasets.Features(
85
- {
86
- "id": datasets.Value("string"),
87
- "premise": datasets.Value("string"),
88
- "hypothesis": datasets.Value("string"),
89
- "label": datasets.Value("string"),
90
- }
91
- )
92
-
93
- class SciTailDataset(datasets.GeneratorBasedBuilder):
94
- """TODO: Short description of my dataset."""
95
-
96
- SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
97
- BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
98
-
99
- BUILDER_CONFIGS = [
100
- BigBioConfig(
101
- name="scitail_source",
102
- version=SOURCE_VERSION,
103
- description="SciTail source schema",
104
- schema="source",
105
- subset_id="scitail",
106
- ),
107
- BigBioConfig(
108
- name="scitail_bigbio_te",
109
- version=BIGBIO_VERSION,
110
- description="SciTail BigBio schema",
111
- schema="bigbio_te",
112
- subset_id="scitail",
113
- ),
114
- ]
115
-
116
- DEFAULT_CONFIG_NAME = "scitail_source"
117
-
118
- def _info(self):
119
-
120
- if self.config.schema == "source":
121
- features = datasets.Features(
122
- {
123
- "id": datasets.Value("string"),
124
- "premise": datasets.Value("string"),
125
- "hypothesis": datasets.Value("string"),
126
- "label": datasets.Value("string"),
127
- }
128
- )
129
-
130
- elif self.config.schema == "bigbio_te":
131
- features = entailment_features
132
-
133
- return datasets.DatasetInfo(
134
- description=_DESCRIPTION,
135
- features=features,
136
- homepage=_HOMEPAGE,
137
- license=str(_LICENSE),
138
- citation=_CITATION,
139
- )
140
-
141
- def _split_generators(self, dl_manager):
142
-
143
- urls = _URLS[_DATASETNAME]
144
- data_dir = dl_manager.download_and_extract(urls)
145
-
146
- return [
147
- datasets.SplitGenerator(
148
- name=datasets.Split.TRAIN,
149
- gen_kwargs={
150
- "filepath": os.path.join(
151
- data_dir, "SciTailV1.1", "tsv_format", "scitail_1.0_train.tsv"
152
- ),
153
- },
154
- ),
155
- datasets.SplitGenerator(
156
- name=datasets.Split.TEST,
157
- gen_kwargs={
158
- "filepath": os.path.join(
159
- data_dir, "SciTailV1.1", "tsv_format", "scitail_1.0_test.tsv"
160
- ),
161
- },
162
- ),
163
- datasets.SplitGenerator(
164
- name=datasets.Split.VALIDATION,
165
- gen_kwargs={
166
- "filepath": os.path.join(
167
- data_dir, "SciTailV1.1", "tsv_format", "scitail_1.0_dev.tsv"
168
- ),
169
- },
170
- ),
171
- ]
172
-
173
- def _generate_examples(self, filepath):
174
- # since examples can contain quotes mid text set quoting to QUOTE_NONE (3) when reading tsv
175
- # e.g.: ... and apply specific "tools" to examples and ...
176
- data = pd.read_csv(
177
- filepath, sep="\t", names=["premise", "hypothesis", "label"], quoting=3
178
- )
179
- data["id"] = data.index
180
-
181
- if self.config.schema == "source":
182
- for _, row in data.iterrows():
183
- yield row["id"], row.to_dict()
184
-
185
- elif self.config.schema == "bigbio_te":
186
- # normalize labels
187
- data["label"] = data["label"].apply(lambda x: LABEL_MAP[x])
188
- for _, row in data.iterrows():
189
- yield row["id"], row.to_dict()