|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""A Dataset loading script for the Controlled Text Reduction dataset.""" |
|
|
|
|
|
import datasets |
|
from dataclasses import dataclass |
|
from pathlib import Path |
|
from typing import List, Tuple |
|
import pandas as pd |
|
import json |
|
import gzip |
|
import itertools |
|
|
|
|
|
_CITATION = """""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary. |
|
The evaluation and test datasets were constructed via controlled crowdsourcing. |
|
The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021). |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main" |
|
|
|
_LICENSE = """MIT License |
|
Copyright (c) 2022 lovodkin93 |
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
|
of this software and associated documentation files (the "Software"), to deal |
|
in the Software without restriction, including without limitation the rights |
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|
copies of the Software, and to permit persons to whom the Software is |
|
furnished to do so, subject to the following conditions: |
|
The above copyright notice and this permission notice shall be included in all |
|
copies or substantial portions of the Software. |
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|
SOFTWARE.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_URLs = { |
|
"DUC-2001-2002": { |
|
"dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv", |
|
"test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv", |
|
"train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv" |
|
}, |
|
"CNN-DM": { |
|
"train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv" |
|
}, |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
class ControlledTextReduction(datasets.GeneratorBasedBuilder): |
|
"""Controlled Text Reduction: dataset for the Controlled Text Reduction task (). |
|
Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary""" |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="DUC-2001-2002", |
|
version=VERSION, |
|
description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark" |
|
), |
|
datasets.BuilderConfig( |
|
name="CNN-DM", |
|
version=VERSION, |
|
description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)" |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = ( |
|
"default" |
|
) |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"doc_text": datasets.Value("string"), |
|
"summary_text": datasets.Value("string"), |
|
"highlight_spans": datasets.Value("string") |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
|
|
URLs = _URLs[self.config.name] |
|
|
|
corpora = {section: Path(dl_manager.download_and_extract(URLs[section])) |
|
for section in URLs} |
|
|
|
if self.config.name=="CNN-DM": |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": corpora["train"] |
|
}, |
|
), |
|
] |
|
|
|
else: |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": corpora["train"] |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": corpora["dev"] |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": corpora["test"] |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath: List[str]): |
|
|
|
""" Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans.""" |
|
|
|
|
|
df = pd.read_csv(filepath, index_col=False) |
|
for counter, dic in enumerate(df.to_dict('records')): |
|
columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"] |
|
for key in columns_to_load_into_object: |
|
dic[key] = eval(dic[key]) |
|
yield counter, dic |