Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
lovodkin93 commited on
Commit
0ac4ef8
1 Parent(s): 731e223

updated the script

Browse files
Files changed (1) hide show
  1. Controlled-Text-Reduction-dataset.py +328 -157
Controlled-Text-Reduction-dataset.py CHANGED
@@ -1,3 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
  #
@@ -12,131 +251,74 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """A Dataset loading script for the Controlled Text Reduction dataset."""
16
 
17
 
18
  import datasets
19
- from dataclasses import dataclass
20
  from pathlib import Path
21
- from typing import List, Tuple
22
  import pandas as pd
23
- import json
24
- import gzip
25
- import itertools
26
-
27
-
28
- _CITATION = """"""
29
- # _CITATION = """\
30
- # @inproceedings{roit2020controlled,
31
- # title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation},
32
- # author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido},
33
- # booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
34
- # pages={7008--7013},
35
- # year={2020}
36
- # }
37
- # """
38
 
39
 
40
  _DESCRIPTION = """\
41
- The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary.
42
- The evaluation and test datasets were constructed via controlled crowdsourcing.
43
- The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021).
 
44
  """
45
 
46
- _HOMEPAGE = "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main"
47
-
48
- _LICENSE = """MIT License
49
- Copyright (c) 2022 lovodkin93
50
- Permission is hereby granted, free of charge, to any person obtaining a copy
51
- of this software and associated documentation files (the "Software"), to deal
52
- in the Software without restriction, including without limitation the rights
53
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
54
- copies of the Software, and to permit persons to whom the Software is
55
- furnished to do so, subject to the following conditions:
56
- The above copyright notice and this permission notice shall be included in all
57
- copies or substantial portions of the Software.
58
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
59
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
60
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
61
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
62
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
63
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
64
- SOFTWARE."""
65
 
 
66
 
67
- # _URLs = {
68
- # "csv": {
69
- # "sentences": {
70
- # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
71
- # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
72
- # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
73
- # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
74
- # },
75
- # "qasrl-annotations": {
76
- # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
77
- # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
78
- # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
79
- # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
80
- # },
81
- # },
82
- # "jsonl": "https://qasrl.org/data/qasrl-gs.tar"
83
- # }
84
 
85
  _URLs = {
86
- "DUC-2001-2002": {
87
- "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
88
- "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
89
- "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv"
90
- },
91
- "CNN-DM": {
92
- "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv",
93
- "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
94
- "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
95
- },
96
  }
97
 
98
-
99
- @dataclass
100
- class ControlledTextReductionConfig(datasets.BuilderConfig):
101
- """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
102
- data_source: str = "DUC-2001-2002" # "DUC-2001-2002" or "CNN-DM"
103
-
104
 
105
 
106
- class ControlledTextReduction(datasets.GeneratorBasedBuilder):
107
- """Controlled Text Reduction: dataset for the Controlled Text Reduction task ().
108
- Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary"""
109
 
110
-
111
- VERSION = datasets.Version("1.0.0")
112
-
113
- BUILDER_CONFIG_CLASS = ControlledTextReductionConfig
114
 
115
  BUILDER_CONFIGS = [
116
- ControlledTextReductionConfig(
117
- name="DUC-2001-2002",
118
- version=VERSION,
119
- description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark",
120
- data_source="DUC-2001-2002"
121
  ),
122
- ControlledTextReductionConfig(
123
- name="CNN-DM",
124
- version=VERSION,
125
- description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)",
126
- data_source="CNN-DM"
127
- )
128
  ]
129
 
130
  DEFAULT_CONFIG_NAME = (
131
- "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
132
  )
133
 
134
  def _info(self):
135
  features = datasets.Features(
136
  {
137
- "doc_text": datasets.Value("string"),
138
- "summary_text": datasets.Value("string"),
139
- "highlight_spans": datasets.Value("string")
 
140
  }
141
  )
142
  return datasets.DatasetInfo(
@@ -159,69 +341,58 @@ class ControlledTextReduction(datasets.GeneratorBasedBuilder):
159
  def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
160
  """Returns SplitGenerators."""
161
 
162
- URLs = _URLs[self.config.data_source]
163
- # Download and prepare all files - keep same structure as URLs
164
- corpora = {section: Path(dl_manager.download_and_extract(URLs[section]))
165
- for section in URLs}
166
-
167
- if self.config.data_source=="CNN-DM":
168
- return [
169
- datasets.SplitGenerator(
170
- name=datasets.Split.TRAIN,
171
- # These kwargs will be passed to _generate_examples
172
- gen_kwargs={
173
- "filepath": corpora["train"]
174
- },
175
- ),
176
- datasets.SplitGenerator(
177
- name=datasets.Split.VALIDATION,
178
- # These kwargs will be passed to _generate_examples
179
- gen_kwargs={
180
- "filepath": corpora["dev"]
181
- },
182
- ),
183
- datasets.SplitGenerator(
184
- name=datasets.Split.TEST,
185
- # These kwargs will be passed to _generate_examples
186
- gen_kwargs={
187
- "filepath": corpora["test"]
188
- },
189
- ),
190
- ]
191
-
192
- else:
193
- return [
194
- datasets.SplitGenerator(
195
- name=datasets.Split.TRAIN,
196
- # These kwargs will be passed to _generate_examples
197
- gen_kwargs={
198
- "filepath": corpora["train"]
199
- },
200
- ),
201
- datasets.SplitGenerator(
202
- name=datasets.Split.VALIDATION,
203
- # These kwargs will be passed to _generate_examples
204
- gen_kwargs={
205
- "filepath": corpora["dev"]
206
- },
207
- ),
208
- datasets.SplitGenerator(
209
- name=datasets.Split.TEST,
210
- # These kwargs will be passed to _generate_examples
211
- gen_kwargs={
212
- "filepath": corpora["test"]
213
- },
214
- ),
215
- ]
216
 
217
- def _generate_examples(self, filepath: List[str]):
218
 
219
- """ Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans."""
 
 
 
220
 
221
  # merge annotations from sections
222
- df = pd.read_csv(filepath, index_col=False)
223
- for counter, dic in enumerate(df.to_dict('records')):
224
- columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"]
225
- for key in columns_to_load_into_object:
226
- dic[key] = eval(dic[key])
227
- yield counter, dic
 
 
 
 
 
 
 
 
 
 
 
1
+ # # coding=utf-8
2
+ # # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ # #
4
+ # # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # # you may not use this file except in compliance with the License.
6
+ # # You may obtain a copy of the License at
7
+ # #
8
+ # # http://www.apache.org/licenses/LICENSE-2.0
9
+ # #
10
+ # # Unless required by applicable law or agreed to in writing, software
11
+ # # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # # See the License for the specific language governing permissions and
14
+ # # limitations under the License.
15
+ # """A Dataset loading script for the Controlled Text Reduction dataset."""
16
+
17
+
18
+ # import datasets
19
+ # from dataclasses import dataclass
20
+ # from pathlib import Path
21
+ # from typing import List, Tuple
22
+ # import pandas as pd
23
+ # import json
24
+ # import gzip
25
+ # import itertools
26
+
27
+
28
+ # _CITATION = """"""
29
+ # # _CITATION = """\
30
+ # # @inproceedings{roit2020controlled,
31
+ # # title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation},
32
+ # # author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido},
33
+ # # booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
34
+ # # pages={7008--7013},
35
+ # # year={2020}
36
+ # # }
37
+ # # """
38
+
39
+
40
+ # _DESCRIPTION = """\
41
+ # The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary.
42
+ # The evaluation and test datasets were constructed via controlled crowdsourcing.
43
+ # The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021).
44
+ # """
45
+
46
+ # _HOMEPAGE = "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main"
47
+
48
+ # _LICENSE = """MIT License
49
+ # Copyright (c) 2022 lovodkin93
50
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
51
+ # of this software and associated documentation files (the "Software"), to deal
52
+ # in the Software without restriction, including without limitation the rights
53
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
54
+ # copies of the Software, and to permit persons to whom the Software is
55
+ # furnished to do so, subject to the following conditions:
56
+ # The above copyright notice and this permission notice shall be included in all
57
+ # copies or substantial portions of the Software.
58
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
59
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
60
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
61
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
62
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
63
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
64
+ # SOFTWARE."""
65
+
66
+
67
+ # # _URLs = {
68
+ # # "csv": {
69
+ # # "sentences": {
70
+ # # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
71
+ # # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
72
+ # # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
73
+ # # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
74
+ # # },
75
+ # # "qasrl-annotations": {
76
+ # # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
77
+ # # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
78
+ # # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
79
+ # # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
80
+ # # },
81
+ # # },
82
+ # # "jsonl": "https://qasrl.org/data/qasrl-gs.tar"
83
+ # # }
84
+
85
+ # _URLs = {
86
+ # "DUC-2001-2002": {
87
+ # "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
88
+ # "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
89
+ # "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv"
90
+ # },
91
+ # "CNN-DM": {
92
+ # "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv",
93
+ # "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
94
+ # "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
95
+ # },
96
+ # }
97
+
98
+
99
+ # @dataclass
100
+ # class ControlledTextReductionConfig(datasets.BuilderConfig):
101
+ # """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
102
+ # data_source: str = "DUC-2001-2002" # "DUC-2001-2002" or "CNN-DM"
103
+
104
+
105
+
106
+ # class ControlledTextReduction(datasets.GeneratorBasedBuilder):
107
+ # """Controlled Text Reduction: dataset for the Controlled Text Reduction task ().
108
+ # Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary"""
109
+
110
+
111
+ # VERSION = datasets.Version("1.0.0")
112
+
113
+ # BUILDER_CONFIG_CLASS = ControlledTextReductionConfig
114
+
115
+ # BUILDER_CONFIGS = [
116
+ # ControlledTextReductionConfig(
117
+ # name="DUC-2001-2002",
118
+ # version=VERSION,
119
+ # description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark",
120
+ # data_source="DUC-2001-2002"
121
+ # ),
122
+ # ControlledTextReductionConfig(
123
+ # name="CNN-DM",
124
+ # version=VERSION,
125
+ # description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)",
126
+ # data_source="CNN-DM"
127
+ # )
128
+ # ]
129
+
130
+ # DEFAULT_CONFIG_NAME = (
131
+ # "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
132
+ # )
133
+
134
+ # def _info(self):
135
+ # features = datasets.Features(
136
+ # {
137
+ # "doc_text": datasets.Value("string"),
138
+ # "summary_text": datasets.Value("string"),
139
+ # "highlight_spans": datasets.Value("string")
140
+ # }
141
+ # )
142
+ # return datasets.DatasetInfo(
143
+ # # This is the description that will appear on the datasets page.
144
+ # description=_DESCRIPTION,
145
+ # # This defines the different columns of the dataset and their types
146
+ # features=features, # Here we define them above because they are different between the two configurations
147
+ # # If there's a common (input, target) tuple from the features,
148
+ # # specify them here. They'll be used if as_supervised=True in
149
+ # # builder.as_dataset.
150
+ # supervised_keys=None,
151
+ # # Homepage of the dataset for documentation
152
+ # homepage=_HOMEPAGE,
153
+ # # License for the dataset if available
154
+ # license=_LICENSE,
155
+ # # Citation for the dataset
156
+ # citation=_CITATION,
157
+ # )
158
+
159
+ # def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
160
+ # """Returns SplitGenerators."""
161
+
162
+ # URLs = _URLs[self.config.data_source]
163
+ # # Download and prepare all files - keep same structure as URLs
164
+ # corpora = {section: Path(dl_manager.download_and_extract(URLs[section]))
165
+ # for section in URLs}
166
+
167
+ # if self.config.data_source=="CNN-DM":
168
+ # return [
169
+ # datasets.SplitGenerator(
170
+ # name=datasets.Split.TRAIN,
171
+ # # These kwargs will be passed to _generate_examples
172
+ # gen_kwargs={
173
+ # "filepath": corpora["train"]
174
+ # },
175
+ # ),
176
+ # datasets.SplitGenerator(
177
+ # name=datasets.Split.VALIDATION,
178
+ # # These kwargs will be passed to _generate_examples
179
+ # gen_kwargs={
180
+ # "filepath": corpora["dev"]
181
+ # },
182
+ # ),
183
+ # datasets.SplitGenerator(
184
+ # name=datasets.Split.TEST,
185
+ # # These kwargs will be passed to _generate_examples
186
+ # gen_kwargs={
187
+ # "filepath": corpora["test"]
188
+ # },
189
+ # ),
190
+ # ]
191
+
192
+ # else:
193
+ # return [
194
+ # datasets.SplitGenerator(
195
+ # name=datasets.Split.TRAIN,
196
+ # # These kwargs will be passed to _generate_examples
197
+ # gen_kwargs={
198
+ # "filepath": corpora["train"]
199
+ # },
200
+ # ),
201
+ # datasets.SplitGenerator(
202
+ # name=datasets.Split.VALIDATION,
203
+ # # These kwargs will be passed to _generate_examples
204
+ # gen_kwargs={
205
+ # "filepath": corpora["dev"]
206
+ # },
207
+ # ),
208
+ # datasets.SplitGenerator(
209
+ # name=datasets.Split.TEST,
210
+ # # These kwargs will be passed to _generate_examples
211
+ # gen_kwargs={
212
+ # "filepath": corpora["test"]
213
+ # },
214
+ # ),
215
+ # ]
216
+
217
+ # def _generate_examples(self, filepath: List[str]):
218
+
219
+ # """ Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans."""
220
+
221
+ # # merge annotations from sections
222
+ # df = pd.read_csv(filepath, index_col=False)
223
+ # for counter, dic in enumerate(df.to_dict('records')):
224
+ # columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"]
225
+ # for key in columns_to_load_into_object:
226
+ # dic[key] = eval(dic[key])
227
+ # yield counter, dic
228
+
229
+
230
+
231
+
232
+
233
+ #################################################################################################################################################
234
+
235
+
236
+
237
+
238
+
239
+
240
  # coding=utf-8
241
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
242
  #
 
251
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
252
  # See the License for the specific language governing permissions and
253
  # limitations under the License.
254
+ """A Dataset loading script for the QA-Discourse dataset (Pyatkin et. al., ACL 2020)."""
255
 
256
 
257
  import datasets
 
258
  from pathlib import Path
259
+ from typing import List
260
  import pandas as pd
261
+
262
+
263
+ _CITATION = """\
264
+ @inproceedings{pyatkin2020qadiscourse,
265
+ title={QADiscourse-Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines},
266
+ author={Pyatkin, Valentina and Klein, Ayal and Tsarfaty, Reut and Dagan, Ido},
267
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
268
+ pages={2804--2819},
269
+ year={2020}
270
+ }"""
 
 
 
 
 
271
 
272
 
273
  _DESCRIPTION = """\
274
+ The dataset contains question-answer pairs to model discourse relations.
275
+ While answers roughly correspond to spans of the sentence, these spans could have been freely adjusted by annotators to grammaticaly fit the question;
276
+ Therefore, answers are given just as text and not as identified spans of the original sentence.
277
+ See the paper for details: QADiscourse - Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines, Pyatkin et. al., 2020
278
  """
279
 
280
+ _HOMEPAGE = "https://github.com/ValentinaPy/QADiscourse"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
 
282
+ _LICENSE = """Resources on this page are licensed CC-BY 4.0, a Creative Commons license requiring Attribution (https://creativecommons.org/licenses/by/4.0/)."""
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
 
285
  _URLs = {
286
+ "wikinews.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_train.tsv",
287
+ "wikinews.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_dev.tsv",
288
+ "wikinews.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_test.tsv",
289
+ "wikipedia.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_train.tsv",
290
+ "wikipedia.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_dev.tsv",
291
+ "wikipedia.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_test.tsv",
 
 
 
 
292
  }
293
 
294
+ COLUMNS = ['qasrl_id', 'sentence', 'worker_id', 'full_question', 'full_answer',
295
+ 'question_start', 'question_aux', 'question_body', 'answer',
296
+ 'untokenized sentence', 'target indices for untok sent']
 
 
 
297
 
298
 
299
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
300
+ class QaDiscourse(datasets.GeneratorBasedBuilder):
301
+ """QA-Discourse: Discourse Relations as Question-Answer Pairs. """
302
 
303
+ VERSION = datasets.Version("1.0.2")
 
 
 
304
 
305
  BUILDER_CONFIGS = [
306
+ datasets.BuilderConfig(
307
+ name="plain_text", version=VERSION, description="This provides the QA-Discourse dataset"
 
 
 
308
  ),
 
 
 
 
 
 
309
  ]
310
 
311
  DEFAULT_CONFIG_NAME = (
312
+ "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
313
  )
314
 
315
  def _info(self):
316
  features = datasets.Features(
317
  {
318
+ "sentence": datasets.Value("string"),
319
+ "sent_id": datasets.Value("string"),
320
+ "question": datasets.Sequence(datasets.Value("string")),
321
+ "answers": datasets.Sequence(datasets.Value("string")),
322
  }
323
  )
324
  return datasets.DatasetInfo(
 
341
  def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
342
  """Returns SplitGenerators."""
343
 
344
+ # Download and prepare all files - keep same structure as _URLs
345
+ corpora = {section: Path(dl_manager.download_and_extract(_URLs[section]))
346
+ for section in _URLs}
347
+
348
+ return [
349
+ datasets.SplitGenerator(
350
+ name=datasets.Split.TRAIN,
351
+ # These kwargs will be passed to _generate_examples
352
+ gen_kwargs={
353
+ "filepaths": [corpora["wikinews.train"],
354
+ corpora["wikipedia.train"]],
355
+ },
356
+ ),
357
+ datasets.SplitGenerator(
358
+ name=datasets.Split.VALIDATION,
359
+ # These kwargs will be passed to _generate_examples
360
+ gen_kwargs={
361
+ "filepaths": [corpora["wikinews.dev"],
362
+ corpora["wikipedia.dev"]],
363
+ },
364
+ ),
365
+ datasets.SplitGenerator(
366
+ name=datasets.Split.TEST,
367
+ # These kwargs will be passed to _generate_examples
368
+ gen_kwargs={
369
+ "filepaths": [corpora["wikinews.test"],
370
+ corpora["wikipedia.test"]],
371
+ },
372
+ ),
373
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374
 
375
+ def _generate_examples(self, filepaths: List[str]):
376
 
377
+ """
378
+ Yields QA-Discourse examples from a tsv file.
379
+ Sentences with no QAs will yield an ``empty QA'' record, where both 'question' and 'answers' are empty lists.
380
+ """
381
 
382
  # merge annotations from sections
383
+ df = pd.concat([pd.read_csv(fn, sep='\t', error_bad_lines=False) for fn in filepaths]).reset_index(drop=True)
384
+ df = df.applymap(str) # must turn all values to strings explicitly to avoid type errors
385
+ for counter, row in df.iterrows():
386
+ # Prepare question (3 "slots" and question mark)
387
+ question = [row.question_start, row.question_aux, row.question_body.rstrip('?'), '?']
388
+ answer = [row.answer]
389
+ if row.question_start == "_": # sentence has no QAs
390
+ question = []
391
+ answer = []
392
+
393
+ yield counter, {
394
+ "sentence": row.sentence,
395
+ "sent_id": row.qasrl_id,
396
+ "question": question,
397
+ "answers": answer,
398
+ }