nlparabic commited on
Commit
e71621c
1 Parent(s): 91fa94a

End of training

Browse files
README.md CHANGED
@@ -17,11 +17,11 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [aubmindlab/aragpt2-base](https://huggingface.co/aubmindlab/aragpt2-base) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.0798
21
- - Bleu: 0.1112
22
- - Rouge1: 0.4634
23
- - Rouge2: 0.2356
24
- - Rougel: 0.4600
25
 
26
  ## Model description
27
 
 
17
 
18
  This model is a fine-tuned version of [aubmindlab/aragpt2-base](https://huggingface.co/aubmindlab/aragpt2-base) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.0752
21
+ - Bleu: 0.0928
22
+ - Rouge1: 0.4343
23
+ - Rouge2: 0.2043
24
+ - Rougel: 0.4304
25
 
26
  ## Model description
27
 
all_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.0,
3
+ "eval_bleu": 0.09284298963391195,
4
+ "eval_loss": 0.07523266971111298,
5
+ "eval_rouge1": 0.434313103224844,
6
+ "eval_rouge2": 0.2043067195259623,
7
+ "eval_rougeL": 0.430449964990971,
8
+ "eval_runtime": 119.8336,
9
+ "eval_samples": 5380,
10
+ "eval_samples_per_second": 44.896,
11
+ "eval_steps_per_second": 5.616,
12
+ "perplexity": 1.0781349710566281,
13
+ "total_flos": 1.23182992982016e+17,
14
+ "train_loss": 0.09744292338859069,
15
+ "train_runtime": 17256.5383,
16
+ "train_samples": 21429,
17
+ "train_samples_per_second": 24.836,
18
+ "train_steps_per_second": 3.105
19
+ }
egy_training_log.txt CHANGED
@@ -298,3 +298,5 @@ INFO:root:Epoch 10.0: Train Loss = 0.0517, Eval Loss = 0.07660207897424698
298
  INFO:absl:Using default tokenizer.
299
  INFO:root:Epoch 11.0: Train Loss = 0.0486, Eval Loss = 0.07839509844779968
300
  INFO:absl:Using default tokenizer.
 
 
 
298
  INFO:absl:Using default tokenizer.
299
  INFO:root:Epoch 11.0: Train Loss = 0.0486, Eval Loss = 0.07839509844779968
300
  INFO:absl:Using default tokenizer.
301
+ INFO:__main__:*** Evaluate ***
302
+ INFO:absl:Using default tokenizer.
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.0,
3
+ "eval_bleu": 0.09284298963391195,
4
+ "eval_loss": 0.07523266971111298,
5
+ "eval_rouge1": 0.434313103224844,
6
+ "eval_rouge2": 0.2043067195259623,
7
+ "eval_rougeL": 0.430449964990971,
8
+ "eval_runtime": 119.8336,
9
+ "eval_samples": 5380,
10
+ "eval_samples_per_second": 44.896,
11
+ "eval_steps_per_second": 5.616,
12
+ "perplexity": 1.0781349710566281
13
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.0,
3
+ "total_flos": 1.23182992982016e+17,
4
+ "train_loss": 0.09744292338859069,
5
+ "train_runtime": 17256.5383,
6
+ "train_samples": 21429,
7
+ "train_samples_per_second": 24.836,
8
+ "train_steps_per_second": 3.105
9
+ }
train_vs_val_loss.png ADDED
trainer_state.json ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.07523266971111298,
3
+ "best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_dj_aragpt2-base/checkpoint-16074",
4
+ "epoch": 11.0,
5
+ "eval_steps": 500,
6
+ "global_step": 29469,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 0.1490328013896942,
14
+ "learning_rate": 4.7947437829691034e-05,
15
+ "loss": 0.435,
16
+ "step": 2679
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_bleu": 0.022539363481404024,
21
+ "eval_loss": 0.08990845084190369,
22
+ "eval_rouge1": 0.26967504263175696,
23
+ "eval_rouge2": 0.06811111838722575,
24
+ "eval_rougeL": 0.2648760311441921,
25
+ "eval_runtime": 118.8639,
26
+ "eval_samples_per_second": 45.262,
27
+ "eval_steps_per_second": 5.662,
28
+ "step": 2679
29
+ },
30
+ {
31
+ "epoch": 2.0,
32
+ "grad_norm": 0.1779935508966446,
33
+ "learning_rate": 4.542388847023361e-05,
34
+ "loss": 0.0908,
35
+ "step": 5358
36
+ },
37
+ {
38
+ "epoch": 2.0,
39
+ "eval_bleu": 0.0494923940085718,
40
+ "eval_loss": 0.08216600120067596,
41
+ "eval_rouge1": 0.34383088385557475,
42
+ "eval_rouge2": 0.12325553058649232,
43
+ "eval_rougeL": 0.33940964680072816,
44
+ "eval_runtime": 181.022,
45
+ "eval_samples_per_second": 29.72,
46
+ "eval_steps_per_second": 3.718,
47
+ "step": 5358
48
+ },
49
+ {
50
+ "epoch": 3.0,
51
+ "grad_norm": 0.13422226905822754,
52
+ "learning_rate": 4.290033911077619e-05,
53
+ "loss": 0.0808,
54
+ "step": 8037
55
+ },
56
+ {
57
+ "epoch": 3.0,
58
+ "eval_bleu": 0.06700443598211842,
59
+ "eval_loss": 0.0785822868347168,
60
+ "eval_rouge1": 0.38347464472946946,
61
+ "eval_rouge2": 0.15815766995910013,
62
+ "eval_rougeL": 0.37897410206784377,
63
+ "eval_runtime": 118.7728,
64
+ "eval_samples_per_second": 45.297,
65
+ "eval_steps_per_second": 5.666,
66
+ "step": 8037
67
+ },
68
+ {
69
+ "epoch": 4.0,
70
+ "grad_norm": 0.17974382638931274,
71
+ "learning_rate": 4.0376789751318766e-05,
72
+ "loss": 0.0738,
73
+ "step": 10716
74
+ },
75
+ {
76
+ "epoch": 4.0,
77
+ "eval_bleu": 0.0782017656533744,
78
+ "eval_loss": 0.07652640342712402,
79
+ "eval_rouge1": 0.4066218577182069,
80
+ "eval_rouge2": 0.1797711825940213,
81
+ "eval_rougeL": 0.4024533186612807,
82
+ "eval_runtime": 302.5488,
83
+ "eval_samples_per_second": 17.782,
84
+ "eval_steps_per_second": 2.224,
85
+ "step": 10716
86
+ },
87
+ {
88
+ "epoch": 5.0,
89
+ "grad_norm": 0.1600717157125473,
90
+ "learning_rate": 3.785324039186134e-05,
91
+ "loss": 0.0681,
92
+ "step": 13395
93
+ },
94
+ {
95
+ "epoch": 5.0,
96
+ "eval_bleu": 0.088003678878398,
97
+ "eval_loss": 0.07556667178869247,
98
+ "eval_rouge1": 0.42424169752606233,
99
+ "eval_rouge2": 0.19635936454770994,
100
+ "eval_rougeL": 0.42040996405014475,
101
+ "eval_runtime": 119.8742,
102
+ "eval_samples_per_second": 44.88,
103
+ "eval_steps_per_second": 5.614,
104
+ "step": 13395
105
+ },
106
+ {
107
+ "epoch": 6.0,
108
+ "grad_norm": 0.17717573046684265,
109
+ "learning_rate": 3.532969103240392e-05,
110
+ "loss": 0.0632,
111
+ "step": 16074
112
+ },
113
+ {
114
+ "epoch": 6.0,
115
+ "eval_bleu": 0.09284298963391195,
116
+ "eval_loss": 0.07523266971111298,
117
+ "eval_rouge1": 0.434313103224844,
118
+ "eval_rouge2": 0.2043067195259623,
119
+ "eval_rougeL": 0.430449964990971,
120
+ "eval_runtime": 119.9224,
121
+ "eval_samples_per_second": 44.862,
122
+ "eval_steps_per_second": 5.612,
123
+ "step": 16074
124
+ },
125
+ {
126
+ "epoch": 7.0,
127
+ "grad_norm": 0.20939625799655914,
128
+ "learning_rate": 3.28061416729465e-05,
129
+ "loss": 0.059,
130
+ "step": 18753
131
+ },
132
+ {
133
+ "epoch": 7.0,
134
+ "eval_bleu": 0.09955364884197439,
135
+ "eval_loss": 0.07554977387189865,
136
+ "eval_rouge1": 0.4438832318000667,
137
+ "eval_rouge2": 0.21524219506914244,
138
+ "eval_rougeL": 0.44011453518152965,
139
+ "eval_runtime": 119.8903,
140
+ "eval_samples_per_second": 44.874,
141
+ "eval_steps_per_second": 5.613,
142
+ "step": 18753
143
+ },
144
+ {
145
+ "epoch": 8.0,
146
+ "grad_norm": 0.18116699159145355,
147
+ "learning_rate": 3.028259231348907e-05,
148
+ "loss": 0.0552,
149
+ "step": 21432
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "eval_bleu": 0.10150066662504155,
154
+ "eval_loss": 0.07605580985546112,
155
+ "eval_rouge1": 0.45000685735983936,
156
+ "eval_rouge2": 0.2217308384368585,
157
+ "eval_rougeL": 0.44629486754481273,
158
+ "eval_runtime": 180.8168,
159
+ "eval_samples_per_second": 29.754,
160
+ "eval_steps_per_second": 3.722,
161
+ "step": 21432
162
+ },
163
+ {
164
+ "epoch": 9.0,
165
+ "grad_norm": 0.20604003965854645,
166
+ "learning_rate": 2.775904295403165e-05,
167
+ "loss": 0.0517,
168
+ "step": 24111
169
+ },
170
+ {
171
+ "epoch": 9.0,
172
+ "eval_bleu": 0.10504101584036675,
173
+ "eval_loss": 0.07660207897424698,
174
+ "eval_rouge1": 0.4527224851378999,
175
+ "eval_rouge2": 0.2249827236829457,
176
+ "eval_rougeL": 0.44886876248725205,
177
+ "eval_runtime": 241.7568,
178
+ "eval_samples_per_second": 22.254,
179
+ "eval_steps_per_second": 2.784,
180
+ "step": 24111
181
+ },
182
+ {
183
+ "epoch": 10.0,
184
+ "grad_norm": 0.17469635605812073,
185
+ "learning_rate": 2.523549359457423e-05,
186
+ "loss": 0.0486,
187
+ "step": 26790
188
+ },
189
+ {
190
+ "epoch": 10.0,
191
+ "eval_bleu": 0.10926649986209491,
192
+ "eval_loss": 0.07839509844779968,
193
+ "eval_rouge1": 0.4612349211854463,
194
+ "eval_rouge2": 0.23378642120255436,
195
+ "eval_rougeL": 0.457790117200362,
196
+ "eval_runtime": 180.2876,
197
+ "eval_samples_per_second": 29.841,
198
+ "eval_steps_per_second": 3.733,
199
+ "step": 26790
200
+ },
201
+ {
202
+ "epoch": 11.0,
203
+ "grad_norm": 0.19318881630897522,
204
+ "learning_rate": 2.2711944235116806e-05,
205
+ "loss": 0.0458,
206
+ "step": 29469
207
+ },
208
+ {
209
+ "epoch": 11.0,
210
+ "eval_bleu": 0.11121120716108289,
211
+ "eval_loss": 0.07983218133449554,
212
+ "eval_rouge1": 0.4633689566450456,
213
+ "eval_rouge2": 0.2356348733383184,
214
+ "eval_rougeL": 0.4600017445894788,
215
+ "eval_runtime": 242.0184,
216
+ "eval_samples_per_second": 22.23,
217
+ "eval_steps_per_second": 2.781,
218
+ "step": 29469
219
+ },
220
+ {
221
+ "epoch": 11.0,
222
+ "step": 29469,
223
+ "total_flos": 1.23182992982016e+17,
224
+ "train_loss": 0.09744292338859069,
225
+ "train_runtime": 17256.5383,
226
+ "train_samples_per_second": 24.836,
227
+ "train_steps_per_second": 3.105
228
+ }
229
+ ],
230
+ "logging_steps": 500,
231
+ "max_steps": 53580,
232
+ "num_input_tokens_seen": 0,
233
+ "num_train_epochs": 20,
234
+ "save_steps": 500,
235
+ "stateful_callbacks": {
236
+ "EarlyStoppingCallback": {
237
+ "args": {
238
+ "early_stopping_patience": 5,
239
+ "early_stopping_threshold": 0.0
240
+ },
241
+ "attributes": {
242
+ "early_stopping_patience_counter": 0
243
+ }
244
+ },
245
+ "TrainerControl": {
246
+ "args": {
247
+ "should_epoch_stop": false,
248
+ "should_evaluate": false,
249
+ "should_log": false,
250
+ "should_save": true,
251
+ "should_training_stop": true
252
+ },
253
+ "attributes": {}
254
+ }
255
+ },
256
+ "total_flos": 1.23182992982016e+17,
257
+ "train_batch_size": 8,
258
+ "trial_name": null,
259
+ "trial_params": null
260
+ }