parquet-converter commited on
Commit
38a0bff
1 Parent(s): b949637

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,54 +0,0 @@
1
- ---
2
- language:
3
- - en
4
- multilinguality:
5
- - monolingual
6
- size_categories:
7
- - 10K<n<100K
8
- task_categories:
9
- - summarization
10
- - text-generation
11
- task_ids: []
12
- tags:
13
- - conditional-text-generation
14
- ---
15
-
16
- # GovReport dataset for summarization
17
-
18
- Dataset for summarization of long documents.\
19
- Adapted from this [repo](https://github.com/luyang-huang96/LongDocSum) and this [paper](https://arxiv.org/pdf/2104.02112.pdf)\
20
- This dataset is compatible with the [`run_summarization.py`](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) script from Transformers if you add this line to the `summarization_name_mapping` variable:
21
- ```python
22
- "ccdv/govreport-summarization": ("report", "summary")
23
- ```
24
-
25
- ### Data Fields
26
-
27
- - `id`: paper id
28
- - `report`: a string containing the body of the report
29
- - `summary`: a string containing the summary of the report
30
-
31
- ### Data Splits
32
-
33
- This dataset has 3 splits: _train_, _validation_, and _test_. \
34
- Token counts with a RoBERTa tokenizer.
35
-
36
- | Dataset Split | Number of Instances | Avg. tokens |
37
- | ------------- | --------------------|:----------------------|
38
- | Train | 17,517 | < 9,000 / < 500 |
39
- | Validation | 973 | < 9,000 / < 500 |
40
- | Test | 973 | < 9,000 / < 500 |
41
-
42
-
43
- # Cite original article
44
- ```
45
- @misc{huang2021efficient,
46
- title={Efficient Attentions for Long Document Summarization},
47
- author={Luyang Huang and Shuyang Cao and Nikolaus Parulian and Heng Ji and Lu Wang},
48
- year={2021},
49
- eprint={2104.02112},
50
- archivePrefix={arXiv},
51
- primaryClass={cs.CL}
52
- }
53
- ```
54
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
valid.zip → document/govreport-summarization-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55243db0da8c1b081fa0336456265923065c9d4c6c32420fd84f02cedb4ddcc4
3
- size 15445708
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:867ccf35bac183066a871fc7c3192110af586361c43f88cb07d763a91421065f
3
+ size 24000719
train.zip → document/govreport-summarization-train-00000-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:201f9a408ec5b86d6b9d6a056079dc104a34cc6c0440b615483b74eee0f02aee
3
- size 270614224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c898b1276a6e516548aeab263661c9d3d913ce3d5a3ac34cd3963fe42595836
3
+ size 234018346
document/govreport-summarization-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:626a0a59b3ecd2cec5e69130edd9fb4d5eb13d19a969d7fb488921d7a7ce63bd
3
+ size 222461029
test.zip → document/govreport-summarization-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d1a576daf0490954f4c72859334a2427b0d34d3bca8917849d94ec17e65dd197
3
- size 14171808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d84a1da2dbe7f8e06cecd3965e2b9adb56bede8d461e64d0e4a043a8de5ad748
3
+ size 26109212
govreport-summarization.py DELETED
@@ -1,96 +0,0 @@
1
- import json
2
- import os
3
- import datasets
4
- from datasets.tasks import TextClassification
5
-
6
- _DESCRIPTION = """
7
- GovReport dataset for summarization.
8
- From paper: Efficient Attentions for Long Document Summarization" by L. Huang et al.
9
- See: https://arxiv.org/pdf/2104.02112.pdf
10
- See: https://github.com/luyang-huang96/LongDocSum
11
- """
12
- _CITATION = """\
13
- @misc{huang2021efficient,
14
- title={Efficient Attentions for Long Document Summarization},
15
- author={Luyang Huang and Shuyang Cao and Nikolaus Parulian and Heng Ji and Lu Wang},
16
- year={2021},
17
- eprint={2104.02112},
18
- archivePrefix={arXiv},
19
- primaryClass={cs.CL}
20
- }
21
- }
22
- """
23
- _ABSTRACT = "summary"
24
- _ARTICLE = "report"
25
-
26
- class GovReportSummarizationConfig(datasets.BuilderConfig):
27
- """BuilderConfig for GovReportSummarization."""
28
-
29
- def __init__(self, **kwargs):
30
- """BuilderConfig for GovReportSummarization.
31
- Args:
32
- **kwargs: keyword arguments forwarded to super.
33
- """
34
- super(GovReportSummarizationConfig, self).__init__(**kwargs)
35
-
36
-
37
- class GovReportSummarizationDataset(datasets.GeneratorBasedBuilder):
38
- """GovReportSummarization Dataset."""
39
-
40
- _TRAIN_FILE = "train.zip"
41
- _VAL_FILE = "valid.zip"
42
- _TEST_FILE = "test.zip"
43
-
44
- BUILDER_CONFIGS = [
45
- GovReportSummarizationConfig(
46
- name="document",
47
- version=datasets.Version("1.0.0"),
48
- description="GovReport dataset for summarization, document",
49
- ),
50
- ]
51
-
52
- DEFAULT_CONFIG_NAME = "document"
53
-
54
- def _info(self):
55
- # Should return a datasets.DatasetInfo object
56
- return datasets.DatasetInfo(
57
- description=_DESCRIPTION,
58
- features=datasets.Features(
59
- {
60
- _ARTICLE: datasets.Value("string"),
61
- _ABSTRACT: datasets.Value("string"),
62
- #"id": datasets.Value("string"),
63
- }
64
- ),
65
- supervised_keys=None,
66
- homepage="https://github.com/luyang-huang96/LongDocSum",
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
-
72
- train_path = os.path.join(dl_manager.download_and_extract(self._TRAIN_FILE), "train.txt")
73
- val_path = os.path.join(dl_manager.download_and_extract(self._VAL_FILE), "valid.txt")
74
- test_path = os.path.join(dl_manager.download_and_extract(self._TEST_FILE), "test.txt")
75
-
76
- return [
77
- datasets.SplitGenerator(
78
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
79
- ),
80
- datasets.SplitGenerator(
81
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}
82
- ),
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
85
- ),
86
- ]
87
-
88
- def _generate_examples(self, filepath):
89
- """Generate GovReportSummarization examples."""
90
- with open(filepath, encoding="utf-8") as f:
91
- for id_, row in enumerate(f):
92
- data = json.loads(row)
93
- report = data["report"]
94
- summary = data["summary"]
95
-
96
- yield id_, {"report": report, "summary": summary}