# # coding=utf-8 | |
# # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# # | |
# # Licensed under the Apache License, Version 2.0 (the "License"); | |
# # you may not use this file except in compliance with the License. | |
# # You may obtain a copy of the License at | |
# # | |
# # http://www.apache.org/licenses/LICENSE-2.0 | |
# # | |
# # Unless required by applicable law or agreed to in writing, software | |
# # distributed under the License is distributed on an "AS IS" BASIS, | |
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# # See the License for the specific language governing permissions and | |
# # limitations under the License. | |
# """A Dataset loading script for the Controlled Text Reduction dataset.""" | |
# import datasets | |
# from dataclasses import dataclass | |
# from pathlib import Path | |
# from typing import List, Tuple | |
# import pandas as pd | |
# import json | |
# import gzip | |
# import itertools | |
# _CITATION = """""" | |
# # _CITATION = """\ | |
# # @inproceedings{roit2020controlled, | |
# # title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation}, | |
# # author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido}, | |
# # booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics}, | |
# # pages={7008--7013}, | |
# # year={2020} | |
# # } | |
# # """ | |
# _DESCRIPTION = """\ | |
# The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary. | |
# The evaluation and test datasets were constructed via controlled crowdsourcing. | |
# The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021). | |
# """ | |
# _HOMEPAGE = "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main" | |
# _LICENSE = """MIT License | |
# Copyright (c) 2022 lovodkin93 | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: | |
# The above copyright notice and this permission notice shall be included in all | |
# copies or substantial portions of the Software. | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
# SOFTWARE.""" | |
# # _URLs = { | |
# # "csv": { | |
# # "sentences": { | |
# # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv", | |
# # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv", | |
# # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv", | |
# # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv", | |
# # }, | |
# # "qasrl-annotations": { | |
# # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv", | |
# # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv", | |
# # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv", | |
# # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv", | |
# # }, | |
# # }, | |
# # "jsonl": "https://qasrl.org/data/qasrl-gs.tar" | |
# # } | |
# _URLs = { | |
# "DUC-2001-2002": { | |
# "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv", | |
# "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv", | |
# "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv" | |
# }, | |
# "CNN-DM": { | |
# "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv", | |
# "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv", | |
# "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv", | |
# }, | |
# } | |
# @dataclass | |
# class ControlledTextReductionConfig(datasets.BuilderConfig): | |
# """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """ | |
# data_source: str = "DUC-2001-2002" # "DUC-2001-2002" or "CNN-DM" | |
# class ControlledTextReduction(datasets.GeneratorBasedBuilder): | |
# """Controlled Text Reduction: dataset for the Controlled Text Reduction task (). | |
# Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary""" | |
# VERSION = datasets.Version("1.0.0") | |
# BUILDER_CONFIG_CLASS = ControlledTextReductionConfig | |
# BUILDER_CONFIGS = [ | |
# ControlledTextReductionConfig( | |
# name="DUC-2001-2002", | |
# version=VERSION, | |
# description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark", | |
# data_source="DUC-2001-2002" | |
# ), | |
# ControlledTextReductionConfig( | |
# name="CNN-DM", | |
# version=VERSION, | |
# description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)", | |
# data_source="CNN-DM" | |
# ) | |
# ] | |
# DEFAULT_CONFIG_NAME = ( | |
# "DUC-2001-2002" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
# ) | |
# def _info(self): | |
# features = datasets.Features( | |
# { | |
# "doc_text": datasets.Value("string"), | |
# "summary_text": datasets.Value("string"), | |
# "highlight_spans": datasets.Value("string") | |
# } | |
# ) | |
# return datasets.DatasetInfo( | |
# # This is the description that will appear on the datasets page. | |
# description=_DESCRIPTION, | |
# # This defines the different columns of the dataset and their types | |
# features=features, # Here we define them above because they are different between the two configurations | |
# # If there's a common (input, target) tuple from the features, | |
# # specify them here. They'll be used if as_supervised=True in | |
# # builder.as_dataset. | |
# supervised_keys=None, | |
# # Homepage of the dataset for documentation | |
# homepage=_HOMEPAGE, | |
# # License for the dataset if available | |
# license=_LICENSE, | |
# # Citation for the dataset | |
# citation=_CITATION, | |
# ) | |
# def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager): | |
# """Returns SplitGenerators.""" | |
# URLs = _URLs[self.config.data_source] | |
# # Download and prepare all files - keep same structure as URLs | |
# corpora = {section: Path(dl_manager.download_and_extract(URLs[section])) | |
# for section in URLs} | |
# if self.config.data_source=="CNN-DM": | |
# return [ | |
# datasets.SplitGenerator( | |
# name=datasets.Split.TRAIN, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "filepath": corpora["train"] | |
# }, | |
# ), | |
# datasets.SplitGenerator( | |
# name=datasets.Split.VALIDATION, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "filepath": corpora["dev"] | |
# }, | |
# ), | |
# datasets.SplitGenerator( | |
# name=datasets.Split.TEST, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "filepath": corpora["test"] | |
# }, | |
# ), | |
# ] | |
# else: | |
# return [ | |
# datasets.SplitGenerator( | |
# name=datasets.Split.TRAIN, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "filepath": corpora["train"] | |
# }, | |
# ), | |
# datasets.SplitGenerator( | |
# name=datasets.Split.VALIDATION, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "filepath": corpora["dev"] | |
# }, | |
# ), | |
# datasets.SplitGenerator( | |
# name=datasets.Split.TEST, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "filepath": corpora["test"] | |
# }, | |
# ), | |
# ] | |
# def _generate_examples(self, filepath: List[str]): | |
# """ Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans.""" | |
# # merge annotations from sections | |
# df = pd.read_csv(filepath, index_col=False) | |
# for counter, dic in enumerate(df.to_dict('records')): | |
# columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"] | |
# for key in columns_to_load_into_object: | |
# dic[key] = eval(dic[key]) | |
# yield counter, dic | |
################################################################################################################################################# | |
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""A Dataset loading script for the Controlled Text Reduction dataset.""" | |
import datasets | |
from pathlib import Path | |
from typing import List | |
import pandas as pd | |
from dataclasses import dataclass | |
_CITATION = """""" | |
# _CITATION = """\ | |
# @inproceedings{roit2020controlled, | |
# title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation}, | |
# author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido}, | |
# booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics}, | |
# pages={7008--7013}, | |
# year={2020} | |
# } | |
# """ | |
_DESCRIPTION = """\ | |
The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary. | |
The evaluation and test datasets were constructed via controlled crowdsourcing. | |
The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021). | |
""" | |
_HOMEPAGE = "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main" | |
_LICENSE = """MIT License | |
Copyright (c) 2022 lovodkin93 | |
Permission is hereby granted, free of charge, to any person obtaining a copy | |
of this software and associated documentation files (the "Software"), to deal | |
in the Software without restriction, including without limitation the rights | |
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
copies of the Software, and to permit persons to whom the Software is | |
furnished to do so, subject to the following conditions: | |
The above copyright notice and this permission notice shall be included in all | |
copies or substantial portions of the Software. | |
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
SOFTWARE.""" | |
_URLs = { | |
"DUC-2001-2002": { | |
"train": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/train_DUC-2001-2002.csv", | |
"dev": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/dev_DUC-2001-2002.csv", | |
"test": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/test_DUC-2001-2002.csv", | |
}, | |
"CNN-DM": { | |
"train": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/train_CNNDM.csv", | |
"dev": "", | |
"test": "", | |
}, | |
} | |
# _URLs = { | |
# "dev_DUC-2001-2002": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/dev_DUC-2001-2002.csv", | |
# "test_DUC-2001-2002": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/test_DUC-2001-2002.csv", | |
# "train_DUC-2001-2002": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/train_DUC-2001-2002.csv" | |
# } | |
COLUMNS = ["doc_text", "summary_text", "highlight_spans"] | |
# _URLs = { | |
# "DUC-2001-2002": { | |
# "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv", | |
# "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv", | |
# "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv" | |
# }, | |
# "CNN-DM": { | |
# "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv", | |
# "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv", | |
# "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv", | |
# }, | |
# } | |
class ControlledTextReductionConfig(datasets.BuilderConfig): | |
""" Allow the loader to re-distribute the original dev and test splits between train, dev and test. """ | |
data_source: str = "DUC-2001-2002" # "DUC-2001-2002" or "CNN-DM" | |
class ControlledTectReduction(datasets.GeneratorBasedBuilder): | |
"""Controlled Text Reduction: dataset for the Controlled Text Reduction task (). | |
Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIG_CLASS = ControlledTextReductionConfig | |
BUILDER_CONFIGS = [ | |
ControlledTextReductionConfig( | |
name="DUC-2001-2002", | |
version=VERSION, | |
description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark", | |
data_source="DUC-2001-2002" | |
), | |
ControlledTextReductionConfig( | |
name="CNN-DM", | |
version=VERSION, | |
description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)", | |
data_source="CNN-DM" | |
) | |
] | |
DEFAULT_CONFIG_NAME = ( | |
"DUC-2001-2002" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
) | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"doc_text": datasets.Value("string"), | |
"summary_text": datasets.Value("string"), | |
"highlight_spans": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, | |
# specify them here. They'll be used if as_supervised=True in | |
# builder.as_dataset. | |
supervised_keys=None, | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager): | |
"""Returns SplitGenerators.""" | |
URLs = _URLs[self.config.data_source] | |
# Download and prepare all files - keep same structure as URLs | |
corpora = {section: Path(dl_manager.download_and_extract(URLs[section])) | |
for section in URLs} | |
if self.config.data_source=="CNN-DM": | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": corpora["train"] | |
}, | |
), | |
] | |
else: | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": corpora["train"] | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": corpora["dev"] | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": corpora["test"] | |
}, | |
), | |
] | |
def _generate_examples(self, filepath: List[str]): | |
""" Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans.""" | |
# merge annotations from sections | |
df = pd.read_csv(filepath) | |
for counter, dic in enumerate(df.to_dict('records')): | |
columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"] | |
# for key in columns_to_load_into_object: | |
# dic[key] = eval(dic[key]) | |
yield counter, dic | |