Datasets:
GEM
/

Languages:
English
License:
Sebastian Gehrmann commited on
Commit
22c2550
1 Parent(s): 1a9b728

add train/val/test

Browse files
Files changed (1) hide show
  1. xsum.py +53 -15
xsum.py CHANGED
@@ -17,12 +17,12 @@ _DESCRIPTION = """\
17
  This is the XSUM subset of the GEM benchmark.
18
  """
19
  _URLs = {
20
- "xsum": {
21
- "data": "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz",
22
- "splits": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json",
23
- "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/xsum.zip",
24
- },
25
- }
26
 
27
  _XSUM_REMOVE_LINES = set(
28
  [
@@ -40,6 +40,7 @@ _XSUM_REMOVE_LINES = set(
40
  ]
41
  )
42
 
 
43
  class Xsum(datasets.GeneratorBasedBuilder):
44
 
45
  BUILDER_CONFIGS = [
@@ -53,7 +54,7 @@ class Xsum(datasets.GeneratorBasedBuilder):
53
  def _info(self):
54
  return datasets.DatasetInfo(
55
  description=_DESCRIPTION,
56
- features = datasets.Features(
57
  {
58
  "gem_id": datasets.Value("string"),
59
  "gem_parent_id": datasets.Value("string"),
@@ -61,8 +62,8 @@ class Xsum(datasets.GeneratorBasedBuilder):
61
  "document": datasets.Value("string"),
62
  "target": datasets.Value("string"),
63
  "references": [datasets.Value("string")],
64
- }
65
- ),
66
  supervised_keys=None,
67
  homepage="",
68
  citation=_CITATION,
@@ -75,12 +76,43 @@ class Xsum(datasets.GeneratorBasedBuilder):
75
  ("challenge_train_sample", "train_xsum_RandomSample500.json"),
76
  ("challenge_validation_sample", "validation_xsum_RandomSample500.json"),
77
  ("challenge_test_backtranslation", "test_xsum_BackTranslation500.json"),
78
- ("challenge_test_bfp_02", "test_xsum_ButterFingersPerturbation_p=0.02_500.json"),
79
- ("challenge_test_bfp_05", "test_xsum_ButterFingersPerturbation_p=0.05_500.json"),
 
 
 
 
 
 
80
  ("challenge_test_nopunc", "test_xsum_WithoutPunctuation500.json"),
81
- ("challenge_test_covid", f"en_test_covid19.jsonl"),
82
  ]
83
  return [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  datasets.SplitGenerator(
85
  name=challenge_split,
86
  gen_kwargs={
@@ -88,7 +120,7 @@ class Xsum(datasets.GeneratorBasedBuilder):
88
  "split": challenge_split,
89
  },
90
  )
91
- for challenge_split, filename in challenge_sets
92
  ]
93
 
94
  def _generate_examples(self, filepath, split, filepaths=None):
@@ -121,9 +153,15 @@ class Xsum(datasets.GeneratorBasedBuilder):
121
  with open(filepath, "r", encoding="utf-8") as f:
122
  split_ids = json.load(f)
123
  for id_, i in enumerate(split_ids[split]):
124
- with open(os.path.join(filepaths, i + ".summary"), "r", encoding="utf-8") as f:
 
 
125
  text = "".join(
126
- [line for line in f.readlines() if line not in _XSUM_REMOVE_LINES and line.strip()]
 
 
 
 
127
  )
128
  segs = text.split("[SN]")
129
  yield id_, {
 
17
  This is the XSUM subset of the GEM benchmark.
18
  """
19
  _URLs = {
20
+ "xsum": {
21
+ "data": "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz",
22
+ "splits": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json",
23
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/xsum.zip",
24
+ },
25
+ }
26
 
27
  _XSUM_REMOVE_LINES = set(
28
  [
 
40
  ]
41
  )
42
 
43
+
44
  class Xsum(datasets.GeneratorBasedBuilder):
45
 
46
  BUILDER_CONFIGS = [
 
54
  def _info(self):
55
  return datasets.DatasetInfo(
56
  description=_DESCRIPTION,
57
+ features=datasets.Features(
58
  {
59
  "gem_id": datasets.Value("string"),
60
  "gem_parent_id": datasets.Value("string"),
 
62
  "document": datasets.Value("string"),
63
  "target": datasets.Value("string"),
64
  "references": [datasets.Value("string")],
65
+ }
66
+ ),
67
  supervised_keys=None,
68
  homepage="",
69
  citation=_CITATION,
 
76
  ("challenge_train_sample", "train_xsum_RandomSample500.json"),
77
  ("challenge_validation_sample", "validation_xsum_RandomSample500.json"),
78
  ("challenge_test_backtranslation", "test_xsum_BackTranslation500.json"),
79
+ (
80
+ "challenge_test_bfp_02",
81
+ "test_xsum_ButterFingersPerturbation_p=0.02_500.json",
82
+ ),
83
+ (
84
+ "challenge_test_bfp_05",
85
+ "test_xsum_ButterFingersPerturbation_p=0.05_500.json",
86
+ ),
87
  ("challenge_test_nopunc", "test_xsum_WithoutPunctuation500.json"),
88
+ ("challenge_test_covid", f"en_test_covid19.jsonl"),
89
  ]
90
  return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ gen_kwargs={
94
+ "filepath": dl_dir["splits"],
95
+ "split": "train",
96
+ "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.VALIDATION,
101
+ gen_kwargs={
102
+ "filepath": dl_dir["splits"],
103
+ "split": "validation",
104
+ "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
105
+ },
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ gen_kwargs={
110
+ "filepath": dl_dir["splits"],
111
+ "split": "test",
112
+ "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
113
+ },
114
+ ),
115
+ ] + [
116
  datasets.SplitGenerator(
117
  name=challenge_split,
118
  gen_kwargs={
 
120
  "split": challenge_split,
121
  },
122
  )
123
+ for challenge_split, filename in challenge_sets
124
  ]
125
 
126
  def _generate_examples(self, filepath, split, filepaths=None):
 
153
  with open(filepath, "r", encoding="utf-8") as f:
154
  split_ids = json.load(f)
155
  for id_, i in enumerate(split_ids[split]):
156
+ with open(
157
+ os.path.join(filepaths, i + ".summary"), "r", encoding="utf-8"
158
+ ) as f:
159
  text = "".join(
160
+ [
161
+ line
162
+ for line in f.readlines()
163
+ if line not in _XSUM_REMOVE_LINES and line.strip()
164
+ ]
165
  )
166
  segs = text.split("[SN]")
167
  yield id_, {