add loading script
Browse files- summ_screen.py +129 -0
summ_screen.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
#
|
3 |
+
"""SummScreen dataset."""
|
4 |
+
|
5 |
+
|
6 |
+
import json
|
7 |
+
|
8 |
+
import datasets
|
9 |
+
|
10 |
+
|
11 |
+
_CITATION = """
|
12 |
+
@inproceedings{chen-etal-2022-summscreen,
|
13 |
+
title = "{S}umm{S}creen: A Dataset for Abstractive Screenplay Summarization",
|
14 |
+
author = "Chen, Mingda and
|
15 |
+
Chu, Zewei and
|
16 |
+
Wiseman, Sam and
|
17 |
+
Gimpel, Kevin",
|
18 |
+
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
19 |
+
month = may,
|
20 |
+
year = "2022",
|
21 |
+
address = "Dublin, Ireland",
|
22 |
+
publisher = "Association for Computational Linguistics",
|
23 |
+
url = "https://aclanthology.org/2022.acl-long.589",
|
24 |
+
pages = "8602--8615",
|
25 |
+
abstract = "We introduce SummScreen, a summarization dataset comprised of pairs of TV series transcripts and human written recaps. The dataset provides a challenging testbed for abstractive summarization for several reasons. Plot details are often expressed indirectly in character dialogues and may be scattered across the entirety of the transcript. These details must be found and integrated to form the succinct plot descriptions in the recaps. Also, TV scripts contain content that does not directly pertain to the central plot but rather serves to develop characters or provide comic relief. This information is rarely contained in recaps. Since characters are fundamental to TV series, we also propose two entity-centric evaluation metrics. Empirically, we characterize the dataset by evaluating several methods, including neural models and those based on nearest neighbors. An oracle extractive approach outperforms all benchmarked models according to automatic metrics, showing that the neural models are unable to fully exploit the input transcripts. Human evaluation and qualitative analysis reveal that our non-oracle models are competitive with their oracle counterparts in terms of generating faithful plot events and can benefit from better content selectors. Both oracle and non-oracle models generate unfaithful facts, suggesting future research directions.",
|
26 |
+
}
|
27 |
+
"""
|
28 |
+
|
29 |
+
_DESCRIPTION = """
|
30 |
+
SummScreen Corpus contains over 26k pairs of TV series transcripts and human written recaps.
|
31 |
+
There are two features:
|
32 |
+
- dialogue: text of dialogue.
|
33 |
+
- summary: human written summary of the dialogue.
|
34 |
+
- id: id of a example.
|
35 |
+
"""
|
36 |
+
|
37 |
+
_HOMEPAGE = "https://aclanthology.org/2022.acl-long.589"
|
38 |
+
|
39 |
+
_SUBSETS = ("all", "fd", "tms")
|
40 |
+
|
41 |
+
_BASE_DATA_URL = "https://huggingface.co/datasets/yuanpj/summ_screen/resolve/main/data/"
|
42 |
+
|
43 |
+
_SUBSET_SPLIT_URL = _BASE_DATA_URL + "{subset}_{split}.json"
|
44 |
+
|
45 |
+
logger = datasets.utils.logging.get_logger(__name__)
|
46 |
+
|
47 |
+
|
48 |
+
class SummscreenConfig(datasets.BuilderConfig):
|
49 |
+
"""BuilderConfig for SummScreen."""
|
50 |
+
|
51 |
+
def __init__(self, name, *args, **kwargs):
|
52 |
+
"""BuilderConfig for SummScreen"""
|
53 |
+
super().__init__(name=name, *args, **kwargs)
|
54 |
+
|
55 |
+
|
56 |
+
class Summscreen(datasets.GeneratorBasedBuilder):
|
57 |
+
"""SummScreen Corpus dataset."""
|
58 |
+
|
59 |
+
VERSION = datasets.Version("1.0.0")
|
60 |
+
|
61 |
+
BUILDER_CONFIGS = [SummscreenConfig(name=subset) for subset in _SUBSETS]
|
62 |
+
|
63 |
+
def _info(self):
|
64 |
+
features = datasets.Features(
|
65 |
+
{
|
66 |
+
"File Name": datasets.Value("string"),
|
67 |
+
"Recap": datasets.Value("string"),
|
68 |
+
"Transcript": datasets.Value("string"),
|
69 |
+
"Show Title": datasets.Value("string"),
|
70 |
+
"Episode Number": datasets.Value("string"),
|
71 |
+
"Episode Title": datasets.Value("string"),
|
72 |
+
"Recap Author": datasets.Value("string"),
|
73 |
+
"Transcript Author": datasets.Value("string"),
|
74 |
+
}
|
75 |
+
)
|
76 |
+
# TODO: add license
|
77 |
+
return datasets.DatasetInfo(
|
78 |
+
description=_DESCRIPTION,
|
79 |
+
features=features,
|
80 |
+
supervised_keys=None,
|
81 |
+
homepage=_HOMEPAGE,
|
82 |
+
citation=_CITATION,
|
83 |
+
)
|
84 |
+
|
85 |
+
def _split_generators(self, dl_manager):
|
86 |
+
"""Returns SplitGenerators."""
|
87 |
+
splits = ["train", "dev", "test"]
|
88 |
+
|
89 |
+
subset_split_urls = {split: [] for split in splits}
|
90 |
+
for split in splits:
|
91 |
+
if self.config.name in ["all", "fd"]:
|
92 |
+
subset_split_urls[split].append(
|
93 |
+
_SUBSET_SPLIT_URL.format(subset="fd", split=split)
|
94 |
+
)
|
95 |
+
if self.config.name in ["all", "tms"]:
|
96 |
+
subset_split_urls[split].append(
|
97 |
+
_SUBSET_SPLIT_URL.format(subset="tms", split=split)
|
98 |
+
)
|
99 |
+
file_paths = dl_manager.download(subset_split_urls)
|
100 |
+
|
101 |
+
return [
|
102 |
+
datasets.SplitGenerator(
|
103 |
+
name=datasets.Split.TRAIN,
|
104 |
+
gen_kwargs={
|
105 |
+
"file_paths": file_paths["train"],
|
106 |
+
},
|
107 |
+
),
|
108 |
+
datasets.SplitGenerator(
|
109 |
+
name=datasets.Split.VALIDATION,
|
110 |
+
gen_kwargs={
|
111 |
+
"file_paths": file_paths["dev"],
|
112 |
+
},
|
113 |
+
),
|
114 |
+
datasets.SplitGenerator(
|
115 |
+
name=datasets.Split.TEST,
|
116 |
+
gen_kwargs={
|
117 |
+
"file_paths": file_paths["test"],
|
118 |
+
},
|
119 |
+
),
|
120 |
+
]
|
121 |
+
|
122 |
+
def _generate_examples(self, file_paths):
|
123 |
+
"""Yields examples."""
|
124 |
+
data = []
|
125 |
+
for file_path in file_paths:
|
126 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
127 |
+
data.extend(json.load(f))
|
128 |
+
for example in data:
|
129 |
+
yield example["File Name"], example
|