Nexspear commited on
Commit
2da08da
·
verified ·
1 Parent(s): b3a2c8e

Training in progress, step 45, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e0d2d46cbdc4b9220d6a849a4a7e2897d04a26f4ceb3fc6d36cfb3722da27ed
3
  size 338298
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:687f4bca35f0d9df993f6d5b62109940fe0e84817db534ae8363cc0735fba861
3
  size 338298
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0eb587af29e9bd567091c9edc1922888b73d8ca2d4cc143c1d03589f6356311c
3
  size 418030
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16377976c107a6c66a499279562140fcb8371bea43a081943d633f33b2e65a86
3
  size 418030
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f7d8e9abeff9f0f2aa7d16e8494b24e0a7eaac3cc565c4ef78e32d32a521d4a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba868925edaf5516534779d862378ff0c5b1a6e19d0ed88b5d7747286b450e1c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd18e5325bf99a4b662a5314e5f491b65903ffc83b0317f63835d8305a50591e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ec0bb6efeeb25000ef7426abb9d88265daa212fa2a170e1fb3f58996efa27f6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.4678899082568808,
5
  "eval_steps": 5,
6
- "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -170,6 +170,28 @@
170
  "eval_samples_per_second": 772.357,
171
  "eval_steps_per_second": 100.742,
172
  "step": 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  }
174
  ],
175
  "logging_steps": 3,
@@ -189,7 +211,7 @@
189
  "attributes": {}
190
  }
191
  },
192
- "total_flos": 904061061120.0,
193
  "train_batch_size": 8,
194
  "trial_name": null,
195
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.6513761467889907,
5
  "eval_steps": 5,
6
+ "global_step": 45,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
170
  "eval_samples_per_second": 772.357,
171
  "eval_steps_per_second": 100.742,
172
  "step": 40
173
+ },
174
+ {
175
+ "epoch": 1.5412844036697249,
176
+ "grad_norm": 0.3164646625518799,
177
+ "learning_rate": 9.549150281252633e-06,
178
+ "loss": 6.8576,
179
+ "step": 42
180
+ },
181
+ {
182
+ "epoch": 1.6513761467889907,
183
+ "grad_norm": 0.3245089054107666,
184
+ "learning_rate": 3.8060233744356633e-06,
185
+ "loss": 6.8992,
186
+ "step": 45
187
+ },
188
+ {
189
+ "epoch": 1.6513761467889907,
190
+ "eval_loss": 6.84040641784668,
191
+ "eval_runtime": 0.0629,
192
+ "eval_samples_per_second": 731.69,
193
+ "eval_steps_per_second": 95.438,
194
+ "step": 45
195
  }
196
  ],
197
  "logging_steps": 3,
 
211
  "attributes": {}
212
  }
213
  },
214
+ "total_flos": 1009785145344.0,
215
  "train_batch_size": 8,
216
  "trial_name": null,
217
  "trial_params": null