Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
Controlled-Text-Reduction-dataset / Controlled-Text-Reduction-dataset.py
lovodkin93's picture
updated the script
0ac4ef8
raw
history blame
17.8 kB
# # coding=utf-8
# # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# """A Dataset loading script for the Controlled Text Reduction dataset."""
# import datasets
# from dataclasses import dataclass
# from pathlib import Path
# from typing import List, Tuple
# import pandas as pd
# import json
# import gzip
# import itertools
# _CITATION = """"""
# # _CITATION = """\
# # @inproceedings{roit2020controlled,
# # title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation},
# # author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido},
# # booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
# # pages={7008--7013},
# # year={2020}
# # }
# # """
# _DESCRIPTION = """\
# The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary.
# The evaluation and test datasets were constructed via controlled crowdsourcing.
# The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021).
# """
# _HOMEPAGE = "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main"
# _LICENSE = """MIT License
# Copyright (c) 2022 lovodkin93
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE."""
# # _URLs = {
# # "csv": {
# # "sentences": {
# # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
# # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
# # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
# # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
# # },
# # "qasrl-annotations": {
# # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
# # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
# # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
# # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
# # },
# # },
# # "jsonl": "https://qasrl.org/data/qasrl-gs.tar"
# # }
# _URLs = {
# "DUC-2001-2002": {
# "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
# "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
# "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv"
# },
# "CNN-DM": {
# "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv",
# "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
# "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
# },
# }
# @dataclass
# class ControlledTextReductionConfig(datasets.BuilderConfig):
# """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
# data_source: str = "DUC-2001-2002" # "DUC-2001-2002" or "CNN-DM"
# class ControlledTextReduction(datasets.GeneratorBasedBuilder):
# """Controlled Text Reduction: dataset for the Controlled Text Reduction task ().
# Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary"""
# VERSION = datasets.Version("1.0.0")
# BUILDER_CONFIG_CLASS = ControlledTextReductionConfig
# BUILDER_CONFIGS = [
# ControlledTextReductionConfig(
# name="DUC-2001-2002",
# version=VERSION,
# description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark",
# data_source="DUC-2001-2002"
# ),
# ControlledTextReductionConfig(
# name="CNN-DM",
# version=VERSION,
# description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)",
# data_source="CNN-DM"
# )
# ]
# DEFAULT_CONFIG_NAME = (
# "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
# )
# def _info(self):
# features = datasets.Features(
# {
# "doc_text": datasets.Value("string"),
# "summary_text": datasets.Value("string"),
# "highlight_spans": datasets.Value("string")
# }
# )
# return datasets.DatasetInfo(
# # This is the description that will appear on the datasets page.
# description=_DESCRIPTION,
# # This defines the different columns of the dataset and their types
# features=features, # Here we define them above because they are different between the two configurations
# # If there's a common (input, target) tuple from the features,
# # specify them here. They'll be used if as_supervised=True in
# # builder.as_dataset.
# supervised_keys=None,
# # Homepage of the dataset for documentation
# homepage=_HOMEPAGE,
# # License for the dataset if available
# license=_LICENSE,
# # Citation for the dataset
# citation=_CITATION,
# )
# def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
# """Returns SplitGenerators."""
# URLs = _URLs[self.config.data_source]
# # Download and prepare all files - keep same structure as URLs
# corpora = {section: Path(dl_manager.download_and_extract(URLs[section]))
# for section in URLs}
# if self.config.data_source=="CNN-DM":
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": corpora["train"]
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.VALIDATION,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": corpora["dev"]
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": corpora["test"]
# },
# ),
# ]
# else:
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": corpora["train"]
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.VALIDATION,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": corpora["dev"]
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": corpora["test"]
# },
# ),
# ]
# def _generate_examples(self, filepath: List[str]):
# """ Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans."""
# # merge annotations from sections
# df = pd.read_csv(filepath, index_col=False)
# for counter, dic in enumerate(df.to_dict('records')):
# columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"]
# for key in columns_to_load_into_object:
# dic[key] = eval(dic[key])
# yield counter, dic
#################################################################################################################################################
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Dataset loading script for the QA-Discourse dataset (Pyatkin et. al., ACL 2020)."""
import datasets
from pathlib import Path
from typing import List
import pandas as pd
_CITATION = """\
@inproceedings{pyatkin2020qadiscourse,
title={QADiscourse-Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines},
author={Pyatkin, Valentina and Klein, Ayal and Tsarfaty, Reut and Dagan, Ido},
booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
pages={2804--2819},
year={2020}
}"""
_DESCRIPTION = """\
The dataset contains question-answer pairs to model discourse relations.
While answers roughly correspond to spans of the sentence, these spans could have been freely adjusted by annotators to grammaticaly fit the question;
Therefore, answers are given just as text and not as identified spans of the original sentence.
See the paper for details: QADiscourse - Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines, Pyatkin et. al., 2020
"""
_HOMEPAGE = "https://github.com/ValentinaPy/QADiscourse"
_LICENSE = """Resources on this page are licensed CC-BY 4.0, a Creative Commons license requiring Attribution (https://creativecommons.org/licenses/by/4.0/)."""
_URLs = {
"wikinews.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_train.tsv",
"wikinews.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_dev.tsv",
"wikinews.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_test.tsv",
"wikipedia.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_train.tsv",
"wikipedia.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_dev.tsv",
"wikipedia.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_test.tsv",
}
COLUMNS = ['qasrl_id', 'sentence', 'worker_id', 'full_question', 'full_answer',
'question_start', 'question_aux', 'question_body', 'answer',
'untokenized sentence', 'target indices for untok sent']
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class QaDiscourse(datasets.GeneratorBasedBuilder):
"""QA-Discourse: Discourse Relations as Question-Answer Pairs. """
VERSION = datasets.Version("1.0.2")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text", version=VERSION, description="This provides the QA-Discourse dataset"
),
]
DEFAULT_CONFIG_NAME = (
"plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
)
def _info(self):
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"sent_id": datasets.Value("string"),
"question": datasets.Sequence(datasets.Value("string")),
"answers": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
"""Returns SplitGenerators."""
# Download and prepare all files - keep same structure as _URLs
corpora = {section: Path(dl_manager.download_and_extract(_URLs[section]))
for section in _URLs}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepaths": [corpora["wikinews.train"],
corpora["wikipedia.train"]],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepaths": [corpora["wikinews.dev"],
corpora["wikipedia.dev"]],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepaths": [corpora["wikinews.test"],
corpora["wikipedia.test"]],
},
),
]
def _generate_examples(self, filepaths: List[str]):
"""
Yields QA-Discourse examples from a tsv file.
Sentences with no QAs will yield an ``empty QA'' record, where both 'question' and 'answers' are empty lists.
"""
# merge annotations from sections
df = pd.concat([pd.read_csv(fn, sep='\t', error_bad_lines=False) for fn in filepaths]).reset_index(drop=True)
df = df.applymap(str) # must turn all values to strings explicitly to avoid type errors
for counter, row in df.iterrows():
# Prepare question (3 "slots" and question mark)
question = [row.question_start, row.question_aux, row.question_body.rstrip('?'), '?']
answer = [row.answer]
if row.question_start == "_": # sentence has no QAs
question = []
answer = []
yield counter, {
"sentence": row.sentence,
"sent_id": row.qasrl_id,
"question": question,
"answers": answer,
}