Datasets:

Languages:
English
ArXiv:
License:
system HF staff commited on
Commit
47960e2
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/1.2.0/dummy_data.zip +3 -0
  4. gigaword.py +124 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\nHeadline-generation on a corpus of article pairs from Gigaword consisting of\naround 4 million articles. Use the 'org_data' provided by\nhttps://github.com/microsoft/unilm/ which is identical to\nhttps://github.com/harvardnlp/sent-summary but with better format.\n\nThere are two features:\n - document: article.\n - summary: headline.\n\n", "citation": "\n@article{graff2003english,\n title={English gigaword},\n author={Graff, David and Kong, Junbo and Chen, Ke and Maeda, Kazuaki},\n journal={Linguistic Data Consortium, Philadelphia},\n volume={4},\n number={1},\n pages={34},\n year={2003}\n}\n\n@article{Rush_2015,\n title={A Neural Attention Model for Abstractive Sentence Summarization},\n url={http://dx.doi.org/10.18653/v1/D15-1044},\n DOI={10.18653/v1/d15-1044},\n journal={Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing},\n publisher={Association for Computational Linguistics},\n author={Rush, Alexander M. and Chopra, Sumit and Weston, Jason},\n year={2015}\n}\n", "homepage": "https://github.com/harvardnlp/sent-summary", "license": "", "features": {"document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": {"input": "document", "output": "summary"}, "builder_name": "gigaword", "config_name": "default", "version": {"version_str": "1.2.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 451514, "num_examples": 1951, "dataset_name": "gigaword"}, "train": {"name": "train", "num_bytes": 916673137, "num_examples": 3803957, "dataset_name": "gigaword"}, "validation": {"name": "validation", "num_bytes": 45838081, "num_examples": 189651, "dataset_name": "gigaword"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1USoQ8lJgN8kAWnUnRrupMGrPMLlDVqlV": {"num_bytes": 578402958, "checksum": "bc0c4a2e1aa19cf2123688b87bc2d778c0d8fc24a4090e3c10a27c5faa1b898b"}}, "download_size": 578402958, "dataset_size": 962962732, "size_in_bytes": 1541365690}}
dummy/1.2.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60a43d59a4975055ba265da13bf83659f0ec63a2828ea14a6a5c71cc68e94687
3
+ size 1564
gigaword.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Gigaword summarization dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """
27
+ @article{graff2003english,
28
+ title={English gigaword},
29
+ author={Graff, David and Kong, Junbo and Chen, Ke and Maeda, Kazuaki},
30
+ journal={Linguistic Data Consortium, Philadelphia},
31
+ volume={4},
32
+ number={1},
33
+ pages={34},
34
+ year={2003}
35
+ }
36
+
37
+ @article{Rush_2015,
38
+ title={A Neural Attention Model for Abstractive Sentence Summarization},
39
+ url={http://dx.doi.org/10.18653/v1/D15-1044},
40
+ DOI={10.18653/v1/d15-1044},
41
+ journal={Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing},
42
+ publisher={Association for Computational Linguistics},
43
+ author={Rush, Alexander M. and Chopra, Sumit and Weston, Jason},
44
+ year={2015}
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """
49
+ Headline-generation on a corpus of article pairs from Gigaword consisting of
50
+ around 4 million articles. Use the 'org_data' provided by
51
+ https://github.com/microsoft/unilm/ which is identical to
52
+ https://github.com/harvardnlp/sent-summary but with better format.
53
+
54
+ There are two features:
55
+ - document: article.
56
+ - summary: headline.
57
+
58
+ """
59
+
60
+ _URL = "https://drive.google.com/uc?export=download&id=1USoQ8lJgN8kAWnUnRrupMGrPMLlDVqlV"
61
+
62
+ _DOCUMENT = "document"
63
+ _SUMMARY = "summary"
64
+
65
+
66
+ class Gigaword(datasets.GeneratorBasedBuilder):
67
+ """Gigaword summarization dataset."""
68
+
69
+ # 1.0.0 contains a bug that uses validation data as training data.
70
+ # 1.1.0 Update to the correct train, validation and test data.
71
+ # 1.2.0 Replace <unk> with <UNK> in train/val to be consistent with test.
72
+ VERSION = datasets.Version("1.2.0")
73
+
74
+ def _info(self):
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=datasets.Features({_DOCUMENT: datasets.Value("string"), _SUMMARY: datasets.Value("string")}),
78
+ supervised_keys=(_DOCUMENT, _SUMMARY),
79
+ homepage="https://github.com/harvardnlp/sent-summary",
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ """Returns SplitGenerators."""
85
+ dl_path = dl_manager.download_and_extract(_URL)
86
+ pattern = os.path.join(dl_path, "org_data", "%s.%s.txt")
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ gen_kwargs={
91
+ "src_path": pattern % ("train", "src"),
92
+ "tgt_path": pattern % ("train", "tgt"),
93
+ "replace_unk": True,
94
+ },
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.VALIDATION,
98
+ gen_kwargs={
99
+ "src_path": pattern % ("dev", "src"),
100
+ "tgt_path": pattern % ("dev", "tgt"),
101
+ "replace_unk": True,
102
+ },
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TEST,
106
+ gen_kwargs={
107
+ "src_path": pattern % ("test", "src"),
108
+ "tgt_path": pattern % ("test", "tgt"),
109
+ "replace_unk": False,
110
+ },
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, src_path=None, tgt_path=None, replace_unk=None):
115
+ """Yields examples."""
116
+ with open(src_path, encoding="utf-8") as f_d, open(tgt_path, encoding="utf-8") as f_s:
117
+ for i, (doc_text, sum_text) in enumerate(zip(f_d, f_s)):
118
+ if replace_unk:
119
+ yield i, {
120
+ _DOCUMENT: doc_text.strip().replace("<unk>", "UNK"),
121
+ _SUMMARY: sum_text.strip().replace("<unk>", "UNK"),
122
+ }
123
+ else:
124
+ yield i, {_DOCUMENT: doc_text.strip(), _SUMMARY: sum_text.strip()}