kokovova commited on
Commit
b15a9d9
1 Parent(s): 948d8cf

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43068dc3deb6eb813d6a28edf7e0215a7f6477f84e786a4a628dab741824630c
3
  size 80013120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:208499bcf727f8b083a7b514a6106316a836d4a1cb4578473e22f620b0608e46
3
  size 80013120
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:107df24863016d74c03b4fb78999b377a162010202a5636d458667e96285852e
3
  size 160284754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a93d97e38917c3dcaf7eb8e2df76373e688dd52f0af073be6d167c19d56d51f9
3
  size 160284754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:156e1688439d5c56c4b456fff105e4b11da885d5d5465581856864395f905df9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6afa12c7f6dfef3a4c050c66fe0d6eeafd92164312401416d48b422a8e0b1a2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2ed9259304616a8ecebc61c5d000777b2978635f7a705b8d7081c480ce0bde
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.005963918294319368,
5
  "eval_steps": 2,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -125,6 +125,116 @@
125
  "eval_samples_per_second": 9.221,
126
  "eval_steps_per_second": 4.61,
127
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  }
129
  ],
130
  "logging_steps": 1,
@@ -139,12 +249,12 @@
139
  "should_evaluate": false,
140
  "should_log": false,
141
  "should_save": true,
142
- "should_training_stop": false
143
  },
144
  "attributes": {}
145
  }
146
  },
147
- "total_flos": 6591291693465600.0,
148
  "train_batch_size": 2,
149
  "trial_name": null,
150
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.011927836588638736,
5
  "eval_steps": 2,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
125
  "eval_samples_per_second": 9.221,
126
  "eval_steps_per_second": 4.61,
127
  "step": 10
128
+ },
129
+ {
130
+ "epoch": 0.006560310123751305,
131
+ "grad_norm": 1.0968209505081177,
132
+ "learning_rate": 0.00019510565162951537,
133
+ "loss": 0.5539,
134
+ "step": 11
135
+ },
136
+ {
137
+ "epoch": 0.007156701953183241,
138
+ "grad_norm": 0.8019706010818481,
139
+ "learning_rate": 0.00018090169943749476,
140
+ "loss": 0.2537,
141
+ "step": 12
142
+ },
143
+ {
144
+ "epoch": 0.007156701953183241,
145
+ "eval_loss": 0.24521681666374207,
146
+ "eval_runtime": 76.5378,
147
+ "eval_samples_per_second": 9.224,
148
+ "eval_steps_per_second": 4.612,
149
+ "step": 12
150
+ },
151
+ {
152
+ "epoch": 0.007753093782615178,
153
+ "grad_norm": 0.8972131609916687,
154
+ "learning_rate": 0.00015877852522924732,
155
+ "loss": 0.188,
156
+ "step": 13
157
+ },
158
+ {
159
+ "epoch": 0.008349485612047115,
160
+ "grad_norm": 1.388610601425171,
161
+ "learning_rate": 0.00013090169943749476,
162
+ "loss": 0.2539,
163
+ "step": 14
164
+ },
165
+ {
166
+ "epoch": 0.008349485612047115,
167
+ "eval_loss": 0.20003721117973328,
168
+ "eval_runtime": 76.5695,
169
+ "eval_samples_per_second": 9.22,
170
+ "eval_steps_per_second": 4.61,
171
+ "step": 14
172
+ },
173
+ {
174
+ "epoch": 0.008945877441479051,
175
+ "grad_norm": 1.2546236515045166,
176
+ "learning_rate": 0.0001,
177
+ "loss": 0.2859,
178
+ "step": 15
179
+ },
180
+ {
181
+ "epoch": 0.009542269270910989,
182
+ "grad_norm": 1.2586054801940918,
183
+ "learning_rate": 6.909830056250527e-05,
184
+ "loss": 0.2893,
185
+ "step": 16
186
+ },
187
+ {
188
+ "epoch": 0.009542269270910989,
189
+ "eval_loss": 0.17982345819473267,
190
+ "eval_runtime": 76.5392,
191
+ "eval_samples_per_second": 9.224,
192
+ "eval_steps_per_second": 4.612,
193
+ "step": 16
194
+ },
195
+ {
196
+ "epoch": 0.010138661100342925,
197
+ "grad_norm": 1.4489102363586426,
198
+ "learning_rate": 4.12214747707527e-05,
199
+ "loss": 0.1875,
200
+ "step": 17
201
+ },
202
+ {
203
+ "epoch": 0.010735052929774861,
204
+ "grad_norm": 1.0019066333770752,
205
+ "learning_rate": 1.9098300562505266e-05,
206
+ "loss": 0.0675,
207
+ "step": 18
208
+ },
209
+ {
210
+ "epoch": 0.010735052929774861,
211
+ "eval_loss": 0.15851947665214539,
212
+ "eval_runtime": 76.5776,
213
+ "eval_samples_per_second": 9.219,
214
+ "eval_steps_per_second": 4.61,
215
+ "step": 18
216
+ },
217
+ {
218
+ "epoch": 0.0113314447592068,
219
+ "grad_norm": 0.7860324382781982,
220
+ "learning_rate": 4.8943483704846475e-06,
221
+ "loss": 0.1514,
222
+ "step": 19
223
+ },
224
+ {
225
+ "epoch": 0.011927836588638736,
226
+ "grad_norm": 0.9038301110267639,
227
+ "learning_rate": 0.0,
228
+ "loss": 0.178,
229
+ "step": 20
230
+ },
231
+ {
232
+ "epoch": 0.011927836588638736,
233
+ "eval_loss": 0.1537449210882187,
234
+ "eval_runtime": 76.5787,
235
+ "eval_samples_per_second": 9.219,
236
+ "eval_steps_per_second": 4.61,
237
+ "step": 20
238
  }
239
  ],
240
  "logging_steps": 1,
 
249
  "should_evaluate": false,
250
  "should_log": false,
251
  "should_save": true,
252
+ "should_training_stop": true
253
  },
254
  "attributes": {}
255
  }
256
  },
257
+ "total_flos": 1.31825833869312e+16,
258
  "train_batch_size": 2,
259
  "trial_name": null,
260
  "trial_params": null