nlparabic commited on
Commit
15d5db4
1 Parent(s): 8e7ec25

End of training

Browse files
README.md CHANGED
@@ -18,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.4497
22
- - Bleu: 0.4202
23
- - Rouge1: 0.6498
24
- - Rouge2: 0.3905
25
- - Rougel: 0.6488
26
 
27
  ## Model description
28
 
 
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.4179
22
+ - Bleu: 0.4093
23
+ - Rouge1: 0.6309
24
+ - Rouge2: 0.3587
25
+ - Rougel: 0.6298
26
 
27
  ## Model description
28
 
all_results.json CHANGED
@@ -1,19 +1,19 @@
1
  {
2
- "epoch": 14.0,
3
- "eval_bleu": 0.23736106874390867,
4
- "eval_loss": 0.4183219075202942,
5
- "eval_rouge1": 0.6213217050496613,
6
- "eval_rouge2": 0.3629928034660268,
7
- "eval_rougeL": 0.6194945230812778,
8
- "eval_runtime": 6.133,
9
- "eval_samples": 1675,
10
- "eval_samples_per_second": 273.111,
11
- "eval_steps_per_second": 34.241,
12
- "perplexity": 1.5194097050971664,
13
- "total_flos": 6127298150400000.0,
14
- "train_loss": 0.03383859399638808,
15
- "train_runtime": 1080.9024,
16
- "train_samples": 6700,
17
- "train_samples_per_second": 123.97,
18
- "train_steps_per_second": 15.506
19
  }
 
1
  {
2
+ "epoch": 9.0,
3
+ "eval_bleu": 0.4093436818765597,
4
+ "eval_loss": 0.41785159707069397,
5
+ "eval_rouge1": 0.6308594985430711,
6
+ "eval_rouge2": 0.35871585587023913,
7
+ "eval_rougeL": 0.6297639876498756,
8
+ "eval_runtime": 5.4884,
9
+ "eval_samples": 1672,
10
+ "eval_samples_per_second": 304.643,
11
+ "eval_steps_per_second": 38.08,
12
+ "perplexity": 1.5186952788498689,
13
+ "total_flos": 3930158776320000.0,
14
+ "train_loss": 0.40989596332913064,
15
+ "train_runtime": 1923.2663,
16
+ "train_samples": 6685,
17
+ "train_samples_per_second": 69.517,
18
+ "train_steps_per_second": 8.694
19
  }
egy_training_log.txt CHANGED
@@ -162,3 +162,5 @@ INFO:root:Epoch 8.0: Train Loss = 0.1492, Eval Loss = 0.4348294138908386
162
  INFO:absl:Using default tokenizer.
163
  INFO:root:Epoch 9.0: Train Loss = 0.1293, Eval Loss = 0.44312888383865356
164
  INFO:absl:Using default tokenizer.
 
 
 
162
  INFO:absl:Using default tokenizer.
163
  INFO:root:Epoch 9.0: Train Loss = 0.1293, Eval Loss = 0.44312888383865356
164
  INFO:absl:Using default tokenizer.
165
+ INFO:__main__:*** Evaluate ***
166
+ INFO:absl:Using default tokenizer.
eval_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 14.0,
3
- "eval_bleu": 0.23736106874390867,
4
- "eval_loss": 0.4183219075202942,
5
- "eval_rouge1": 0.6213217050496613,
6
- "eval_rouge2": 0.3629928034660268,
7
- "eval_rougeL": 0.6194945230812778,
8
- "eval_runtime": 6.133,
9
- "eval_samples": 1675,
10
- "eval_samples_per_second": 273.111,
11
- "eval_steps_per_second": 34.241,
12
- "perplexity": 1.5194097050971664
13
  }
 
1
  {
2
+ "epoch": 9.0,
3
+ "eval_bleu": 0.4093436818765597,
4
+ "eval_loss": 0.41785159707069397,
5
+ "eval_rouge1": 0.6308594985430711,
6
+ "eval_rouge2": 0.35871585587023913,
7
+ "eval_rougeL": 0.6297639876498756,
8
+ "eval_runtime": 5.4884,
9
+ "eval_samples": 1672,
10
+ "eval_samples_per_second": 304.643,
11
+ "eval_steps_per_second": 38.08,
12
+ "perplexity": 1.5186952788498689
13
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 14.0,
3
- "total_flos": 6127298150400000.0,
4
- "train_loss": 0.03383859399638808,
5
- "train_runtime": 1080.9024,
6
- "train_samples": 6700,
7
- "train_samples_per_second": 123.97,
8
- "train_steps_per_second": 15.506
9
  }
 
1
  {
2
+ "epoch": 9.0,
3
+ "total_flos": 3930158776320000.0,
4
+ "train_loss": 0.40989596332913064,
5
+ "train_runtime": 1923.2663,
6
+ "train_samples": 6685,
7
+ "train_samples_per_second": 69.517,
8
+ "train_steps_per_second": 8.694
9
  }
train_vs_val_loss.png CHANGED
trainer_state.json CHANGED
@@ -1,291 +1,196 @@
1
  {
2
- "best_metric": 0.4183219075202942,
3
- "best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_gulf/checkpoint-3352",
4
- "epoch": 14.0,
5
  "eval_steps": 500,
6
- "global_step": 11732,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 1.1270569562911987,
14
- "learning_rate": 4.8960639606396064e-05,
15
- "loss": 1.9575,
16
- "step": 838
17
  },
18
  {
19
  "epoch": 1.0,
20
- "eval_bleu": 0.17899151773967856,
21
- "eval_loss": 0.4805418848991394,
22
- "eval_rouge1": 0.5300700267573193,
23
- "eval_rouge2": 0.25846419107204655,
24
- "eval_rougeL": 0.5287371290578213,
25
- "eval_runtime": 26.4765,
26
- "eval_samples_per_second": 63.264,
27
- "eval_steps_per_second": 7.932,
28
- "step": 838
29
  },
30
  {
31
  "epoch": 2.0,
32
- "grad_norm": 0.8918192982673645,
33
- "learning_rate": 4.6383763837638376e-05,
34
- "loss": 0.422,
35
- "step": 1676
36
  },
37
  {
38
  "epoch": 2.0,
39
- "eval_bleu": 0.2111987576275603,
40
- "eval_loss": 0.4326799809932709,
41
- "eval_rouge1": 0.5789346222407353,
42
- "eval_rouge2": 0.3115433580134526,
43
- "eval_rougeL": 0.5777838651600141,
44
- "eval_runtime": 36.5489,
45
- "eval_samples_per_second": 45.829,
46
- "eval_steps_per_second": 5.746,
47
- "step": 1676
48
  },
49
  {
50
  "epoch": 3.0,
51
- "grad_norm": 1.1533405780792236,
52
- "learning_rate": 4.380688806888069e-05,
53
- "loss": 0.331,
54
- "step": 2514
55
  },
56
  {
57
  "epoch": 3.0,
58
- "eval_bleu": 0.22887638181669745,
59
- "eval_loss": 0.41845184564590454,
60
- "eval_rouge1": 0.6066983915980586,
61
- "eval_rouge2": 0.34626660980048474,
62
- "eval_rougeL": 0.6055721645591957,
63
- "eval_runtime": 25.3383,
64
- "eval_samples_per_second": 66.106,
65
- "eval_steps_per_second": 8.288,
66
- "step": 2514
67
  },
68
  {
69
  "epoch": 4.0,
70
- "grad_norm": 1.3615330457687378,
71
- "learning_rate": 4.123001230012301e-05,
72
- "loss": 0.2638,
73
- "step": 3352
74
  },
75
  {
76
  "epoch": 4.0,
77
- "eval_bleu": 0.23736106874390867,
78
- "eval_loss": 0.4183219075202942,
79
- "eval_rouge1": 0.6213217050496613,
80
- "eval_rouge2": 0.3629928034660268,
81
- "eval_rougeL": 0.6194945230812778,
82
- "eval_runtime": 16.1799,
83
- "eval_samples_per_second": 103.524,
84
- "eval_steps_per_second": 12.979,
85
- "step": 3352
86
  },
87
  {
88
  "epoch": 5.0,
89
- "grad_norm": 1.29606294631958,
90
- "learning_rate": 3.865313653136531e-05,
91
- "loss": 0.2131,
92
- "step": 4190
93
  },
94
  {
95
  "epoch": 5.0,
96
- "eval_bleu": 0.24225546513762886,
97
- "eval_loss": 0.42484816908836365,
98
- "eval_rouge1": 0.6280394265150125,
99
- "eval_rouge2": 0.3718667234148674,
100
- "eval_rougeL": 0.6264526155291095,
101
- "eval_runtime": 25.4283,
102
- "eval_samples_per_second": 65.872,
103
- "eval_steps_per_second": 8.259,
104
- "step": 4190
105
  },
106
  {
107
  "epoch": 6.0,
108
- "grad_norm": 1.9162726402282715,
109
- "learning_rate": 3.6076260762607624e-05,
110
- "loss": 0.1756,
111
- "step": 5028
112
  },
113
  {
114
  "epoch": 6.0,
115
- "eval_bleu": 0.25110200021461276,
116
- "eval_loss": 0.4322951138019562,
117
- "eval_rouge1": 0.6329099900871186,
118
- "eval_rouge2": 0.3818443278310246,
119
- "eval_rougeL": 0.6311516008372622,
120
- "eval_runtime": 13.3457,
121
- "eval_samples_per_second": 125.508,
122
- "eval_steps_per_second": 15.735,
123
- "step": 5028
124
  },
125
  {
126
  "epoch": 7.0,
127
- "grad_norm": 1.2213388681411743,
128
- "learning_rate": 3.349938499384994e-05,
129
- "loss": 0.148,
130
- "step": 5866
131
  },
132
  {
133
  "epoch": 7.0,
134
- "eval_bleu": 0.24584223218153298,
135
- "eval_loss": 0.4369480013847351,
136
- "eval_rouge1": 0.6346640667719812,
137
- "eval_rouge2": 0.3890963668887653,
138
- "eval_rougeL": 0.6326978702709232,
139
- "eval_runtime": 6.3313,
140
- "eval_samples_per_second": 264.559,
141
- "eval_steps_per_second": 33.169,
142
- "step": 5866
143
  },
144
  {
145
  "epoch": 8.0,
146
- "grad_norm": 1.774276852607727,
147
- "learning_rate": 3.092250922509225e-05,
148
- "loss": 0.1285,
149
- "step": 6704
150
  },
151
  {
152
  "epoch": 8.0,
153
- "eval_bleu": 0.2499016538097514,
154
- "eval_loss": 0.4447513520717621,
155
- "eval_rouge1": 0.6380175687280993,
156
- "eval_rouge2": 0.38845767776215345,
157
- "eval_rougeL": 0.6368182514454324,
158
- "eval_runtime": 5.4681,
159
- "eval_samples_per_second": 306.322,
160
- "eval_steps_per_second": 38.405,
161
- "step": 6704
162
  },
163
  {
164
  "epoch": 9.0,
165
- "grad_norm": 0.779574990272522,
166
- "learning_rate": 2.8345633456334564e-05,
167
- "loss": 0.1152,
168
- "step": 7542
169
  },
170
  {
171
  "epoch": 9.0,
172
- "eval_bleu": 0.2538776211394201,
173
- "eval_loss": 0.45251065492630005,
174
- "eval_rouge1": 0.6407467681145795,
175
- "eval_rouge2": 0.3976657405838769,
176
- "eval_rougeL": 0.6393461853572592,
177
- "eval_runtime": 6.3546,
178
- "eval_samples_per_second": 263.59,
179
- "eval_steps_per_second": 33.047,
180
- "step": 7542
181
  },
182
  {
183
- "epoch": 10.0,
184
- "grad_norm": 1.159719705581665,
185
- "learning_rate": 2.5768757687576876e-05,
186
- "loss": 0.105,
187
- "step": 8380
188
- },
189
- {
190
- "epoch": 10.0,
191
- "eval_bleu": 0.2555335148067654,
192
- "eval_loss": 0.45896273851394653,
193
- "eval_rouge1": 0.6440840106866277,
194
- "eval_rouge2": 0.3998364161836553,
195
- "eval_rougeL": 0.6425824451349154,
196
- "eval_runtime": 5.4849,
197
- "eval_samples_per_second": 305.385,
198
- "eval_steps_per_second": 38.287,
199
- "step": 8380
200
- },
201
- {
202
- "epoch": 11.0,
203
- "grad_norm": 0.923600435256958,
204
- "learning_rate": 2.3191881918819188e-05,
205
- "loss": 0.0982,
206
- "step": 9218
207
- },
208
- {
209
- "epoch": 11.0,
210
- "eval_bleu": 0.25802091175426106,
211
- "eval_loss": 0.46457362174987793,
212
- "eval_rouge1": 0.6455262594531862,
213
- "eval_rouge2": 0.40187791063551404,
214
- "eval_rougeL": 0.6445205006389869,
215
- "eval_runtime": 6.4432,
216
- "eval_samples_per_second": 259.965,
217
- "eval_steps_per_second": 32.593,
218
- "step": 9218
219
- },
220
- {
221
- "epoch": 12.0,
222
- "grad_norm": 0.9120431542396545,
223
- "learning_rate": 2.0615006150061504e-05,
224
- "loss": 0.0936,
225
- "step": 10056
226
- },
227
- {
228
- "epoch": 12.0,
229
- "eval_bleu": 0.257202175451904,
230
- "eval_loss": 0.470233678817749,
231
- "eval_rouge1": 0.6456936789584837,
232
- "eval_rouge2": 0.4045772651416589,
233
- "eval_rougeL": 0.6445820832270409,
234
- "eval_runtime": 13.7399,
235
- "eval_samples_per_second": 121.907,
236
- "eval_steps_per_second": 15.284,
237
- "step": 10056
238
- },
239
- {
240
- "epoch": 13.0,
241
- "grad_norm": 0.6435267329216003,
242
- "learning_rate": 1.8038130381303812e-05,
243
- "loss": 0.0899,
244
- "step": 10894
245
- },
246
- {
247
- "epoch": 13.0,
248
- "eval_bleu": 0.25765634141938715,
249
- "eval_loss": 0.47367072105407715,
250
- "eval_rouge1": 0.6488279078084271,
251
- "eval_rouge2": 0.40534740561340493,
252
- "eval_rougeL": 0.6478436883847489,
253
- "eval_runtime": 17.2761,
254
- "eval_samples_per_second": 96.955,
255
- "eval_steps_per_second": 12.156,
256
- "step": 10894
257
- },
258
- {
259
- "epoch": 14.0,
260
- "grad_norm": 0.7371336221694946,
261
- "learning_rate": 1.5461254612546124e-05,
262
- "loss": 0.0871,
263
- "step": 11732
264
- },
265
- {
266
- "epoch": 14.0,
267
- "eval_bleu": 0.2606034683461693,
268
- "eval_loss": 0.4779162108898163,
269
- "eval_rouge1": 0.6491650390141908,
270
- "eval_rouge2": 0.40618585329089035,
271
- "eval_rougeL": 0.6482221321240169,
272
- "eval_runtime": 5.371,
273
- "eval_samples_per_second": 311.859,
274
- "eval_steps_per_second": 39.099,
275
- "step": 11732
276
- },
277
- {
278
- "epoch": 14.0,
279
- "step": 11732,
280
- "total_flos": 6127298150400000.0,
281
- "train_loss": 0.03383859399638808,
282
- "train_runtime": 1080.9024,
283
- "train_samples_per_second": 123.97,
284
- "train_steps_per_second": 15.506
285
  }
286
  ],
287
  "logging_steps": 500,
288
- "max_steps": 16760,
289
  "num_input_tokens_seen": 0,
290
  "num_train_epochs": 20,
291
  "save_steps": 500,
@@ -310,7 +215,7 @@
310
  "attributes": {}
311
  }
312
  },
313
- "total_flos": 6127298150400000.0,
314
  "train_batch_size": 8,
315
  "trial_name": null,
316
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.41785159707069397,
3
+ "best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_gulf/checkpoint-3344",
4
+ "epoch": 9.0,
5
  "eval_steps": 500,
6
+ "global_step": 7524,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 1.0378659963607788,
14
+ "learning_rate": 4.896424167694204e-05,
15
+ "loss": 1.8893,
16
+ "step": 836
17
  },
18
  {
19
  "epoch": 1.0,
20
+ "eval_bleu": 0.3817836670274851,
21
+ "eval_loss": 0.4814806580543518,
22
+ "eval_rouge1": 0.53445667160678,
23
+ "eval_rouge2": 0.2556577725762693,
24
+ "eval_rougeL": 0.5334625759517182,
25
+ "eval_runtime": 13.5021,
26
+ "eval_samples_per_second": 123.833,
27
+ "eval_steps_per_second": 15.479,
28
+ "step": 836
29
  },
30
  {
31
  "epoch": 2.0,
32
+ "grad_norm": 1.1964277029037476,
33
+ "learning_rate": 4.638717632552405e-05,
34
+ "loss": 0.4211,
35
+ "step": 1672
36
  },
37
  {
38
  "epoch": 2.0,
39
+ "eval_bleu": 0.39468460167151326,
40
+ "eval_loss": 0.4300891160964966,
41
+ "eval_rouge1": 0.5886104983083813,
42
+ "eval_rouge2": 0.30839577052783973,
43
+ "eval_rougeL": 0.5877200747673375,
44
+ "eval_runtime": 5.8159,
45
+ "eval_samples_per_second": 287.485,
46
+ "eval_steps_per_second": 35.936,
47
+ "step": 1672
48
  },
49
  {
50
  "epoch": 3.0,
51
+ "grad_norm": 1.0570303201675415,
52
+ "learning_rate": 4.3810110974106046e-05,
53
+ "loss": 0.3307,
54
+ "step": 2508
55
  },
56
  {
57
  "epoch": 3.0,
58
+ "eval_bleu": 0.40983966101019575,
59
+ "eval_loss": 0.4181276261806488,
60
+ "eval_rouge1": 0.6130185046348249,
61
+ "eval_rouge2": 0.33816617915935476,
62
+ "eval_rougeL": 0.6119109322410228,
63
+ "eval_runtime": 18.13,
64
+ "eval_samples_per_second": 92.223,
65
+ "eval_steps_per_second": 11.528,
66
+ "step": 2508
67
  },
68
  {
69
  "epoch": 4.0,
70
+ "grad_norm": 1.0588442087173462,
71
+ "learning_rate": 4.1233045622688044e-05,
72
+ "loss": 0.2641,
73
+ "step": 3344
74
  },
75
  {
76
  "epoch": 4.0,
77
+ "eval_bleu": 0.4093436818765597,
78
+ "eval_loss": 0.41785159707069397,
79
+ "eval_rouge1": 0.6308594985430711,
80
+ "eval_rouge2": 0.35871585587023913,
81
+ "eval_rougeL": 0.6297639876498756,
82
+ "eval_runtime": 5.6414,
83
+ "eval_samples_per_second": 296.382,
84
+ "eval_steps_per_second": 37.048,
85
+ "step": 3344
86
  },
87
  {
88
  "epoch": 5.0,
89
+ "grad_norm": 1.3207675218582153,
90
+ "learning_rate": 3.8655980271270036e-05,
91
+ "loss": 0.2135,
92
+ "step": 4180
93
  },
94
  {
95
  "epoch": 5.0,
96
+ "eval_bleu": 0.41246868412584214,
97
+ "eval_loss": 0.4242132008075714,
98
+ "eval_rouge1": 0.6373073919820111,
99
+ "eval_rouge2": 0.36914621811033116,
100
+ "eval_rougeL": 0.6359208794207342,
101
+ "eval_runtime": 5.3736,
102
+ "eval_samples_per_second": 311.149,
103
+ "eval_steps_per_second": 38.894,
104
+ "step": 4180
105
  },
106
  {
107
  "epoch": 6.0,
108
+ "grad_norm": 1.1845903396606445,
109
+ "learning_rate": 3.6078914919852034e-05,
110
+ "loss": 0.1765,
111
+ "step": 5016
112
  },
113
  {
114
  "epoch": 6.0,
115
+ "eval_bleu": 0.41560183658228855,
116
+ "eval_loss": 0.43002957105636597,
117
+ "eval_rouge1": 0.6465581796614536,
118
+ "eval_rouge2": 0.382979021316462,
119
+ "eval_rougeL": 0.6453232583485639,
120
+ "eval_runtime": 5.4874,
121
+ "eval_samples_per_second": 304.7,
122
+ "eval_steps_per_second": 38.088,
123
+ "step": 5016
124
  },
125
  {
126
  "epoch": 7.0,
127
+ "grad_norm": 0.9745954871177673,
128
+ "learning_rate": 3.350184956843403e-05,
129
+ "loss": 0.1492,
130
+ "step": 5852
131
  },
132
  {
133
  "epoch": 7.0,
134
+ "eval_bleu": 0.41879193694803385,
135
+ "eval_loss": 0.4348294138908386,
136
+ "eval_rouge1": 0.6460454718776507,
137
+ "eval_rouge2": 0.3852436451416871,
138
+ "eval_rougeL": 0.6448992585011752,
139
+ "eval_runtime": 12.7816,
140
+ "eval_samples_per_second": 130.813,
141
+ "eval_steps_per_second": 16.352,
142
+ "step": 5852
143
  },
144
  {
145
  "epoch": 8.0,
146
+ "grad_norm": 0.8118007779121399,
147
+ "learning_rate": 3.092478421701603e-05,
148
+ "loss": 0.1293,
149
+ "step": 6688
150
  },
151
  {
152
  "epoch": 8.0,
153
+ "eval_bleu": 0.4190463110305805,
154
+ "eval_loss": 0.44312888383865356,
155
+ "eval_rouge1": 0.6492735112653347,
156
+ "eval_rouge2": 0.389473573727392,
157
+ "eval_rougeL": 0.6482287927411368,
158
+ "eval_runtime": 5.4157,
159
+ "eval_samples_per_second": 308.729,
160
+ "eval_steps_per_second": 38.591,
161
+ "step": 6688
162
  },
163
  {
164
  "epoch": 9.0,
165
+ "grad_norm": 0.9986652135848999,
166
+ "learning_rate": 2.8347718865598028e-05,
167
+ "loss": 0.1155,
168
+ "step": 7524
169
  },
170
  {
171
  "epoch": 9.0,
172
+ "eval_bleu": 0.4202341801445125,
173
+ "eval_loss": 0.449709415435791,
174
+ "eval_rouge1": 0.6498383797626817,
175
+ "eval_rouge2": 0.39052732019696323,
176
+ "eval_rougeL": 0.6488280555236853,
177
+ "eval_runtime": 5.4735,
178
+ "eval_samples_per_second": 305.473,
179
+ "eval_steps_per_second": 38.184,
180
+ "step": 7524
181
  },
182
  {
183
+ "epoch": 9.0,
184
+ "step": 7524,
185
+ "total_flos": 3930158776320000.0,
186
+ "train_loss": 0.40989596332913064,
187
+ "train_runtime": 1923.2663,
188
+ "train_samples_per_second": 69.517,
189
+ "train_steps_per_second": 8.694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  }
191
  ],
192
  "logging_steps": 500,
193
+ "max_steps": 16720,
194
  "num_input_tokens_seen": 0,
195
  "num_train_epochs": 20,
196
  "save_steps": 500,
 
215
  "attributes": {}
216
  }
217
  },
218
+ "total_flos": 3930158776320000.0,
219
  "train_batch_size": 8,
220
  "trial_name": null,
221
  "trial_params": null