Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
de54f13
1 Parent(s): f556296

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -10,8 +10,6 @@ license:
10
  - gfdl
11
  multilinguality:
12
  - monolingual
13
- paperswithcode_id: wikitext-2
14
- pretty_name: WikiText
15
  size_categories:
16
  - 1M<n<10M
17
  source_datasets:
@@ -22,40 +20,42 @@ task_categories:
22
  task_ids:
23
  - language-modeling
24
  - masked-language-modeling
 
 
25
  dataset_info:
26
- - config_name: wikitext-103-v1
27
  features:
28
  - name: text
29
  dtype: string
30
  splits:
31
  - name: test
32
- num_bytes: 1295579
33
  num_examples: 4358
34
  - name: train
35
- num_bytes: 545142639
36
  num_examples: 1801350
37
  - name: validation
38
- num_bytes: 1154755
39
  num_examples: 3760
40
- download_size: 190229076
41
- dataset_size: 547592973
42
- - config_name: wikitext-2-v1
43
  features:
44
  - name: text
45
  dtype: string
46
  splits:
47
  - name: test
48
- num_bytes: 1270951
49
  num_examples: 4358
50
  - name: train
51
- num_bytes: 10918134
52
- num_examples: 36718
53
  - name: validation
54
- num_bytes: 1134127
55
  num_examples: 3760
56
- download_size: 4475746
57
- dataset_size: 13323212
58
- - config_name: wikitext-103-raw-v1
59
  features:
60
  - name: text
61
  dtype: string
@@ -64,29 +64,38 @@ dataset_info:
64
  num_bytes: 1305092
65
  num_examples: 4358
66
  - name: train
67
- num_bytes: 546501673
68
- num_examples: 1801350
69
  - name: validation
70
  num_bytes: 1159292
71
  num_examples: 3760
72
- download_size: 191984949
73
- dataset_size: 548966057
74
- - config_name: wikitext-2-raw-v1
75
  features:
76
  - name: text
77
  dtype: string
78
  splits:
79
  - name: test
80
- num_bytes: 1305092
81
  num_examples: 4358
82
  - name: train
83
- num_bytes: 11061733
84
  num_examples: 36718
85
  - name: validation
86
- num_bytes: 1159292
87
  num_examples: 3760
88
- download_size: 4721645
89
- dataset_size: 13526117
 
 
 
 
 
 
 
 
 
90
  ---
91
 
92
  # Dataset Card for "wikitext"
 
10
  - gfdl
11
  multilinguality:
12
  - monolingual
 
 
13
  size_categories:
14
  - 1M<n<10M
15
  source_datasets:
 
20
  task_ids:
21
  - language-modeling
22
  - masked-language-modeling
23
+ paperswithcode_id: wikitext-2
24
+ pretty_name: WikiText
25
  dataset_info:
26
+ - config_name: wikitext-103-raw-v1
27
  features:
28
  - name: text
29
  dtype: string
30
  splits:
31
  - name: test
32
+ num_bytes: 1305092
33
  num_examples: 4358
34
  - name: train
35
+ num_bytes: 546501673
36
  num_examples: 1801350
37
  - name: validation
38
+ num_bytes: 1159292
39
  num_examples: 3760
40
+ download_size: 191984949
41
+ dataset_size: 548966057
42
+ - config_name: wikitext-103-v1
43
  features:
44
  - name: text
45
  dtype: string
46
  splits:
47
  - name: test
48
+ num_bytes: 1295575
49
  num_examples: 4358
50
  - name: train
51
+ num_bytes: 545141915
52
+ num_examples: 1801350
53
  - name: validation
54
+ num_bytes: 1154751
55
  num_examples: 3760
56
+ download_size: 313093838
57
+ dataset_size: 547592241
58
+ - config_name: wikitext-2-raw-v1
59
  features:
60
  - name: text
61
  dtype: string
 
64
  num_bytes: 1305092
65
  num_examples: 4358
66
  - name: train
67
+ num_bytes: 11061733
68
+ num_examples: 36718
69
  - name: validation
70
  num_bytes: 1159292
71
  num_examples: 3760
72
+ download_size: 4721645
73
+ dataset_size: 13526117
74
+ - config_name: wikitext-2-v1
75
  features:
76
  - name: text
77
  dtype: string
78
  splits:
79
  - name: test
80
+ num_bytes: 1270951
81
  num_examples: 4358
82
  - name: train
83
+ num_bytes: 10918134
84
  num_examples: 36718
85
  - name: validation
86
+ num_bytes: 1134127
87
  num_examples: 3760
88
+ download_size: 4475746
89
+ dataset_size: 13323212
90
+ configs:
91
+ - config_name: wikitext-103-v1
92
+ data_files:
93
+ - split: test
94
+ path: wikitext-103-v1/test-*
95
+ - split: train
96
+ path: wikitext-103-v1/train-*
97
+ - split: validation
98
+ path: wikitext-103-v1/validation-*
99
  ---
100
 
101
  # Dataset Card for "wikitext"
dataset_infos.json CHANGED
@@ -1 +1,211 @@
1
- {"wikitext-103-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-103-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1295579, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 545142639, "num_examples": 1801350, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1154755, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip": {"num_bytes": 190229076, "checksum": "242ba0f20b329cfdf1ccc61e9e9e5b59becf189db7f7a81cd2a0e2fc31539590"}}, "download_size": 190229076, "post_processing_size": null, "dataset_size": 547592973, "size_in_bytes": 737822049}, "wikitext-2-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-2-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1270951, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 10918134, "num_examples": 36718, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1134127, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip": {"num_bytes": 4475746, "checksum": "92675f1d63015c1c8b51f1656a52d5bdbc33aafa60cc47a218a66e7ee817488c"}}, "download_size": 4475746, "post_processing_size": null, "dataset_size": 13323212, "size_in_bytes": 17798958}, "wikitext-103-raw-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-103-raw-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1305092, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 546501673, "num_examples": 1801350, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1159292, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip": {"num_bytes": 191984949, "checksum": "91c00ae287f0d699e18605c84afc9e45c192bc6b7797ff8837e5474655a33794"}}, "download_size": 191984949, "post_processing_size": null, "dataset_size": 548966057, "size_in_bytes": 740951006}, "wikitext-2-raw-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-2-raw-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1305092, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 11061733, "num_examples": 36718, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1159292, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip": {"num_bytes": 4721645, "checksum": "ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11"}}, "download_size": 4721645, "post_processing_size": null, "dataset_size": 13526117, "size_in_bytes": 18247762}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "wikitext-103-v1": {
3
+ "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
4
+ "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
5
+ "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
6
+ "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
7
+ "features": {
8
+ "text": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ }
12
+ },
13
+ "builder_name": "wikitext",
14
+ "dataset_name": "wikitext",
15
+ "config_name": "wikitext-103-v1",
16
+ "version": {
17
+ "version_str": "1.0.0",
18
+ "major": 1,
19
+ "minor": 0,
20
+ "patch": 0
21
+ },
22
+ "splits": {
23
+ "test": {
24
+ "name": "test",
25
+ "num_bytes": 1295575,
26
+ "num_examples": 4358,
27
+ "dataset_name": null
28
+ },
29
+ "train": {
30
+ "name": "train",
31
+ "num_bytes": 545141915,
32
+ "num_examples": 1801350,
33
+ "dataset_name": null
34
+ },
35
+ "validation": {
36
+ "name": "validation",
37
+ "num_bytes": 1154751,
38
+ "num_examples": 3760,
39
+ "dataset_name": null
40
+ }
41
+ },
42
+ "download_size": 313093838,
43
+ "dataset_size": 547592241,
44
+ "size_in_bytes": 860686079
45
+ },
46
+ "wikitext-2-v1": {
47
+ "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
48
+ "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
49
+ "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
50
+ "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
51
+ "features": {
52
+ "text": {
53
+ "dtype": "string",
54
+ "id": null,
55
+ "_type": "Value"
56
+ }
57
+ },
58
+ "post_processed": null,
59
+ "supervised_keys": null,
60
+ "task_templates": null,
61
+ "builder_name": "wikitext",
62
+ "config_name": "wikitext-2-v1",
63
+ "version": {
64
+ "version_str": "1.0.0",
65
+ "description": null,
66
+ "major": 1,
67
+ "minor": 0,
68
+ "patch": 0
69
+ },
70
+ "splits": {
71
+ "test": {
72
+ "name": "test",
73
+ "num_bytes": 1270951,
74
+ "num_examples": 4358,
75
+ "dataset_name": "wikitext"
76
+ },
77
+ "train": {
78
+ "name": "train",
79
+ "num_bytes": 10918134,
80
+ "num_examples": 36718,
81
+ "dataset_name": "wikitext"
82
+ },
83
+ "validation": {
84
+ "name": "validation",
85
+ "num_bytes": 1134127,
86
+ "num_examples": 3760,
87
+ "dataset_name": "wikitext"
88
+ }
89
+ },
90
+ "download_checksums": {
91
+ "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip": {
92
+ "num_bytes": 4475746,
93
+ "checksum": "92675f1d63015c1c8b51f1656a52d5bdbc33aafa60cc47a218a66e7ee817488c"
94
+ }
95
+ },
96
+ "download_size": 4475746,
97
+ "post_processing_size": null,
98
+ "dataset_size": 13323212,
99
+ "size_in_bytes": 17798958
100
+ },
101
+ "wikitext-103-raw-v1": {
102
+ "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
103
+ "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
104
+ "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
105
+ "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
106
+ "features": {
107
+ "text": {
108
+ "dtype": "string",
109
+ "id": null,
110
+ "_type": "Value"
111
+ }
112
+ },
113
+ "post_processed": null,
114
+ "supervised_keys": null,
115
+ "task_templates": null,
116
+ "builder_name": "wikitext",
117
+ "config_name": "wikitext-103-raw-v1",
118
+ "version": {
119
+ "version_str": "1.0.0",
120
+ "description": null,
121
+ "major": 1,
122
+ "minor": 0,
123
+ "patch": 0
124
+ },
125
+ "splits": {
126
+ "test": {
127
+ "name": "test",
128
+ "num_bytes": 1305092,
129
+ "num_examples": 4358,
130
+ "dataset_name": "wikitext"
131
+ },
132
+ "train": {
133
+ "name": "train",
134
+ "num_bytes": 546501673,
135
+ "num_examples": 1801350,
136
+ "dataset_name": "wikitext"
137
+ },
138
+ "validation": {
139
+ "name": "validation",
140
+ "num_bytes": 1159292,
141
+ "num_examples": 3760,
142
+ "dataset_name": "wikitext"
143
+ }
144
+ },
145
+ "download_checksums": {
146
+ "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip": {
147
+ "num_bytes": 191984949,
148
+ "checksum": "91c00ae287f0d699e18605c84afc9e45c192bc6b7797ff8837e5474655a33794"
149
+ }
150
+ },
151
+ "download_size": 191984949,
152
+ "post_processing_size": null,
153
+ "dataset_size": 548966057,
154
+ "size_in_bytes": 740951006
155
+ },
156
+ "wikitext-2-raw-v1": {
157
+ "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
158
+ "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
159
+ "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
160
+ "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
161
+ "features": {
162
+ "text": {
163
+ "dtype": "string",
164
+ "id": null,
165
+ "_type": "Value"
166
+ }
167
+ },
168
+ "post_processed": null,
169
+ "supervised_keys": null,
170
+ "task_templates": null,
171
+ "builder_name": "wikitext",
172
+ "config_name": "wikitext-2-raw-v1",
173
+ "version": {
174
+ "version_str": "1.0.0",
175
+ "description": null,
176
+ "major": 1,
177
+ "minor": 0,
178
+ "patch": 0
179
+ },
180
+ "splits": {
181
+ "test": {
182
+ "name": "test",
183
+ "num_bytes": 1305092,
184
+ "num_examples": 4358,
185
+ "dataset_name": "wikitext"
186
+ },
187
+ "train": {
188
+ "name": "train",
189
+ "num_bytes": 11061733,
190
+ "num_examples": 36718,
191
+ "dataset_name": "wikitext"
192
+ },
193
+ "validation": {
194
+ "name": "validation",
195
+ "num_bytes": 1159292,
196
+ "num_examples": 3760,
197
+ "dataset_name": "wikitext"
198
+ }
199
+ },
200
+ "download_checksums": {
201
+ "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip": {
202
+ "num_bytes": 4721645,
203
+ "checksum": "ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11"
204
+ }
205
+ },
206
+ "download_size": 4721645,
207
+ "post_processing_size": null,
208
+ "dataset_size": 13526117,
209
+ "size_in_bytes": 18247762
210
+ }
211
+ }
wikitext-103-v1/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abdfc9f83b1103b502924072460d4c92f277c9b49c313cef3e48cfcf7428e125
3
+ size 721735
wikitext-103-v1/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2ecca8c3250e79518e45d125f3a9a757d8014f6b2d8435c602be87c1f79ec3b
3
+ size 155788327
wikitext-103-v1/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:720f2503551f33c25bb822aad74d699fee4d5331a7373d0c262f1bfb01354fcf
3
+ size 155928670
wikitext-103-v1/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a586125adab06f115018c43507ac267ea70850ce6218cbb96e08bb3b4db0899b
3
+ size 655106