jammygrams commited on
Commit
5885b20
·
1 Parent(s): 6f01f26
config.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/bart-large",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "adapters": {
6
+ "adapters": {
7
+ "narrativeqa": "b1017368d7a97b11"
8
+ },
9
+ "config_map": {
10
+ "b1017368d7a97b11": {
11
+ "adapter_residual_before_ln": false,
12
+ "cross_adapter": false,
13
+ "inv_adapter": null,
14
+ "inv_adapter_reduction_factor": null,
15
+ "leave_out": [],
16
+ "ln_after": false,
17
+ "ln_before": false,
18
+ "mh_adapter": true,
19
+ "non_linearity": "swish",
20
+ "original_ln_after": true,
21
+ "original_ln_before": false,
22
+ "output_adapter": true,
23
+ "reduction_factor": 16,
24
+ "residual_before_ln": true
25
+ }
26
+ },
27
+ "fusion_config_map": {},
28
+ "fusions": {}
29
+ },
30
+ "add_bias_logits": false,
31
+ "add_final_layer_norm": false,
32
+ "architectures": [
33
+ "BartForConditionalGeneration"
34
+ ],
35
+ "attention_dropout": 0.1,
36
+ "bos_token_id": 0,
37
+ "classif_dropout": 0.1,
38
+ "classifier_dropout": 0.0,
39
+ "d_model": 1024,
40
+ "decoder_attention_heads": 16,
41
+ "decoder_ffn_dim": 4096,
42
+ "decoder_layerdrop": 0.0,
43
+ "decoder_layers": 12,
44
+ "decoder_start_token_id": 2,
45
+ "dropout": 0.1,
46
+ "early_stopping": true,
47
+ "encoder_attention_heads": 16,
48
+ "encoder_ffn_dim": 4096,
49
+ "encoder_layerdrop": 0.0,
50
+ "encoder_layers": 12,
51
+ "eos_token_id": 2,
52
+ "forced_bos_token_id": 0,
53
+ "forced_eos_token_id": 2,
54
+ "gradient_checkpointing": false,
55
+ "id2label": {
56
+ "0": "LABEL_0",
57
+ "1": "LABEL_1",
58
+ "2": "LABEL_2"
59
+ },
60
+ "init_std": 0.02,
61
+ "is_encoder_decoder": true,
62
+ "label2id": {
63
+ "LABEL_0": 0,
64
+ "LABEL_1": 1,
65
+ "LABEL_2": 2
66
+ },
67
+ "max_length": 100,
68
+ "max_position_embeddings": 1024,
69
+ "model_type": "bart",
70
+ "no_repeat_ngram_size": 3,
71
+ "normalize_before": false,
72
+ "num_beams": 4,
73
+ "num_hidden_layers": 12,
74
+ "pad_token_id": 1,
75
+ "scale_embedding": false,
76
+ "task_specific_params": {
77
+ "summarization": {
78
+ "length_penalty": 1.0,
79
+ "max_length": 128,
80
+ "min_length": 12,
81
+ "num_beams": 4
82
+ },
83
+ "summarization_cnn": {
84
+ "length_penalty": 2.0,
85
+ "max_length": 142,
86
+ "min_length": 56,
87
+ "num_beams": 4
88
+ },
89
+ "summarization_xsum": {
90
+ "length_penalty": 1.0,
91
+ "max_length": 62,
92
+ "min_length": 11,
93
+ "num_beams": 6
94
+ }
95
+ },
96
+ "torch_dtype": "float32",
97
+ "transformers_version": "4.11.3",
98
+ "use_cache": true,
99
+ "vocab_size": 50265
100
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c29e771c485cc43397918e6abe52ceab229e6ed30fe18f146aa3c6a6c1ea23e
3
+ size 50862209
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb816166887207b7ea21185d167d6da0ba177f41baa00d82e300ed39c7a9637c
3
+ size 1651044481
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f048bad90a2efb6cc0c5721b0518c125372847ef3b924127f0af48435cc1fd7
3
+ size 14567
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2365d2f7d6d17d401755b5d0e71791ff68b94486ddb37364e00319aeebc6c91
3
+ size 559
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73f8dcdd3dcb257460bce686c7aeef55f57856948c8e140ec3c404003f819b53
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "facebook/bart-large", "tokenizer_class": "BartTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 5.976424694061279,
3
+ "best_model_checkpoint": "saved_models/narrativeqa_adaptertune/checkpoint-4092",
4
+ "epoch": 1.9996335654085746,
5
+ "global_step": 4092,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.05,
12
+ "learning_rate": 9.968067774519389e-05,
13
+ "loss": 10.6425,
14
+ "step": 100
15
+ },
16
+ {
17
+ "epoch": 0.1,
18
+ "learning_rate": 9.935483870967742e-05,
19
+ "loss": 6.447,
20
+ "step": 200
21
+ },
22
+ {
23
+ "epoch": 0.15,
24
+ "learning_rate": 9.902899967416097e-05,
25
+ "loss": 6.2146,
26
+ "step": 300
27
+ },
28
+ {
29
+ "epoch": 0.2,
30
+ "learning_rate": 9.870316063864451e-05,
31
+ "loss": 6.1529,
32
+ "step": 400
33
+ },
34
+ {
35
+ "epoch": 0.24,
36
+ "learning_rate": 9.837732160312806e-05,
37
+ "loss": 6.1086,
38
+ "step": 500
39
+ },
40
+ {
41
+ "epoch": 0.29,
42
+ "learning_rate": 9.80514825676116e-05,
43
+ "loss": 6.0845,
44
+ "step": 600
45
+ },
46
+ {
47
+ "epoch": 0.34,
48
+ "learning_rate": 9.772564353209514e-05,
49
+ "loss": 6.065,
50
+ "step": 700
51
+ },
52
+ {
53
+ "epoch": 0.39,
54
+ "learning_rate": 9.73998044965787e-05,
55
+ "loss": 6.0532,
56
+ "step": 800
57
+ },
58
+ {
59
+ "epoch": 0.44,
60
+ "learning_rate": 9.707396546106225e-05,
61
+ "loss": 6.0406,
62
+ "step": 900
63
+ },
64
+ {
65
+ "epoch": 0.49,
66
+ "learning_rate": 9.674812642554578e-05,
67
+ "loss": 6.0327,
68
+ "step": 1000
69
+ },
70
+ {
71
+ "epoch": 0.54,
72
+ "learning_rate": 9.642228739002933e-05,
73
+ "loss": 6.023,
74
+ "step": 1100
75
+ },
76
+ {
77
+ "epoch": 0.59,
78
+ "learning_rate": 9.609644835451288e-05,
79
+ "loss": 6.0156,
80
+ "step": 1200
81
+ },
82
+ {
83
+ "epoch": 0.64,
84
+ "learning_rate": 9.577060931899642e-05,
85
+ "loss": 6.0196,
86
+ "step": 1300
87
+ },
88
+ {
89
+ "epoch": 0.68,
90
+ "learning_rate": 9.544477028347996e-05,
91
+ "loss": 6.0088,
92
+ "step": 1400
93
+ },
94
+ {
95
+ "epoch": 0.73,
96
+ "learning_rate": 9.51189312479635e-05,
97
+ "loss": 6.0029,
98
+ "step": 1500
99
+ },
100
+ {
101
+ "epoch": 0.78,
102
+ "learning_rate": 9.479309221244705e-05,
103
+ "loss": 6.0015,
104
+ "step": 1600
105
+ },
106
+ {
107
+ "epoch": 0.83,
108
+ "learning_rate": 9.446725317693061e-05,
109
+ "loss": 6.0022,
110
+ "step": 1700
111
+ },
112
+ {
113
+ "epoch": 0.88,
114
+ "learning_rate": 9.414141414141415e-05,
115
+ "loss": 5.9995,
116
+ "step": 1800
117
+ },
118
+ {
119
+ "epoch": 0.93,
120
+ "learning_rate": 9.381557510589769e-05,
121
+ "loss": 5.9931,
122
+ "step": 1900
123
+ },
124
+ {
125
+ "epoch": 0.98,
126
+ "learning_rate": 9.348973607038124e-05,
127
+ "loss": 5.9961,
128
+ "step": 2000
129
+ },
130
+ {
131
+ "epoch": 1.0,
132
+ "eval_loss": 5.981632232666016,
133
+ "eval_rouge1": 0.4942,
134
+ "eval_rouge2": 0.2497,
135
+ "eval_rougeL": 0.489,
136
+ "eval_runtime": 642.0603,
137
+ "eval_samples_per_second": 10.781,
138
+ "eval_steps_per_second": 2.696,
139
+ "step": 2046
140
+ },
141
+ {
142
+ "epoch": 1.03,
143
+ "learning_rate": 9.316389703486478e-05,
144
+ "loss": 6.0409,
145
+ "step": 2100
146
+ },
147
+ {
148
+ "epoch": 1.08,
149
+ "learning_rate": 9.283805799934833e-05,
150
+ "loss": 5.9926,
151
+ "step": 2200
152
+ },
153
+ {
154
+ "epoch": 1.12,
155
+ "learning_rate": 9.251221896383187e-05,
156
+ "loss": 5.9925,
157
+ "step": 2300
158
+ },
159
+ {
160
+ "epoch": 1.17,
161
+ "learning_rate": 9.218963831867058e-05,
162
+ "loss": 5.9883,
163
+ "step": 2400
164
+ },
165
+ {
166
+ "epoch": 1.22,
167
+ "learning_rate": 9.186379928315413e-05,
168
+ "loss": 5.9822,
169
+ "step": 2500
170
+ },
171
+ {
172
+ "epoch": 1.27,
173
+ "learning_rate": 9.153796024763767e-05,
174
+ "loss": 5.9871,
175
+ "step": 2600
176
+ },
177
+ {
178
+ "epoch": 1.32,
179
+ "learning_rate": 9.121212121212121e-05,
180
+ "loss": 5.9836,
181
+ "step": 2700
182
+ },
183
+ {
184
+ "epoch": 1.37,
185
+ "learning_rate": 9.088628217660477e-05,
186
+ "loss": 5.9832,
187
+ "step": 2800
188
+ },
189
+ {
190
+ "epoch": 1.42,
191
+ "learning_rate": 9.05604431410883e-05,
192
+ "loss": 5.9885,
193
+ "step": 2900
194
+ },
195
+ {
196
+ "epoch": 1.47,
197
+ "learning_rate": 9.023460410557186e-05,
198
+ "loss": 5.9866,
199
+ "step": 3000
200
+ },
201
+ {
202
+ "epoch": 1.51,
203
+ "learning_rate": 8.99087650700554e-05,
204
+ "loss": 5.9802,
205
+ "step": 3100
206
+ },
207
+ {
208
+ "epoch": 1.56,
209
+ "learning_rate": 8.958292603453894e-05,
210
+ "loss": 5.9789,
211
+ "step": 3200
212
+ },
213
+ {
214
+ "epoch": 1.61,
215
+ "learning_rate": 8.925708699902249e-05,
216
+ "loss": 5.9857,
217
+ "step": 3300
218
+ },
219
+ {
220
+ "epoch": 1.66,
221
+ "learning_rate": 8.893124796350603e-05,
222
+ "loss": 5.9781,
223
+ "step": 3400
224
+ },
225
+ {
226
+ "epoch": 1.71,
227
+ "learning_rate": 8.860540892798957e-05,
228
+ "loss": 5.9747,
229
+ "step": 3500
230
+ },
231
+ {
232
+ "epoch": 1.76,
233
+ "learning_rate": 8.827956989247312e-05,
234
+ "loss": 5.9757,
235
+ "step": 3600
236
+ },
237
+ {
238
+ "epoch": 1.81,
239
+ "learning_rate": 8.795373085695668e-05,
240
+ "loss": 5.9775,
241
+ "step": 3700
242
+ },
243
+ {
244
+ "epoch": 1.86,
245
+ "learning_rate": 8.762789182144022e-05,
246
+ "loss": 5.9787,
247
+ "step": 3800
248
+ },
249
+ {
250
+ "epoch": 1.91,
251
+ "learning_rate": 8.730205278592376e-05,
252
+ "loss": 5.9827,
253
+ "step": 3900
254
+ },
255
+ {
256
+ "epoch": 1.95,
257
+ "learning_rate": 8.69762137504073e-05,
258
+ "loss": 5.9755,
259
+ "step": 4000
260
+ },
261
+ {
262
+ "epoch": 2.0,
263
+ "eval_loss": 5.976424694061279,
264
+ "eval_rouge1": 0.5089,
265
+ "eval_rouge2": 0.2563,
266
+ "eval_rougeL": 0.5034,
267
+ "eval_runtime": 639.6582,
268
+ "eval_samples_per_second": 10.821,
269
+ "eval_steps_per_second": 2.706,
270
+ "step": 4092
271
+ }
272
+ ],
273
+ "max_steps": 30690,
274
+ "num_train_epochs": 15,
275
+ "total_flos": 1.727047442376622e+17,
276
+ "trial_name": null,
277
+ "trial_params": null
278
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceffd1a1b30d80d345b19e5563597de905436a28d7790560a5c1b9f15ab3cda7
3
+ size 2991
vocab.json ADDED
The diff for this file is too large to render. See raw diff