ardaspear commited on
Commit
f9c28a7
·
verified ·
1 Parent(s): 1e98531

Training in progress, step 54, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e67ae6f5029c748d71484f4ed224e6ebb67a1fdafb8705f4664ad9114adebb5b
3
  size 338298
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2f2971439d88f6db0bc8827138b0e15bf7514b74aa28fdbbe00e3b10f7a2248
3
  size 338298
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6abaacf93b4a89b45980e86ad4801e9b5860e9ec1483988f571fd9fe37bac55
3
  size 418030
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88254e9b0975f5d59302c648d167fdb1882638b5516bfcd2a47b8961fa05da7b
3
  size 418030
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac8aa19cd9ecc96793f7d95ebdf271ae9d5fba9ad8aee081a27d3558a59c4a68
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dbb97c11c6f1318074260e982b59a91d8a5584bd279926c1beca30dfe95c3f0
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0896677f34f08366c4c71a3f69133212afc490296ec7ee225b8aefde1f38f24c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2011b1e019073e4bafc29de9703ff0a6e7c1252c3a53d804807bd1c99d390d1c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.22959183673469388,
5
  "eval_steps": 9,
6
- "global_step": 45,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -160,6 +160,35 @@
160
  "eval_samples_per_second": 604.065,
161
  "eval_steps_per_second": 153.762,
162
  "step": 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  }
164
  ],
165
  "logging_steps": 3,
@@ -179,7 +208,7 @@
179
  "attributes": {}
180
  }
181
  },
182
- "total_flos": 432507617280.0,
183
  "train_batch_size": 4,
184
  "trial_name": null,
185
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2755102040816326,
5
  "eval_steps": 9,
6
+ "global_step": 54,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
160
  "eval_samples_per_second": 604.065,
161
  "eval_steps_per_second": 153.762,
162
  "step": 45
163
+ },
164
+ {
165
+ "epoch": 0.24489795918367346,
166
+ "grad_norm": 0.25101956725120544,
167
+ "learning_rate": 6.209609477998338e-05,
168
+ "loss": 6.8369,
169
+ "step": 48
170
+ },
171
+ {
172
+ "epoch": 0.2602040816326531,
173
+ "grad_norm": 0.21246355772018433,
174
+ "learning_rate": 5.695865504800327e-05,
175
+ "loss": 6.8376,
176
+ "step": 51
177
+ },
178
+ {
179
+ "epoch": 0.2755102040816326,
180
+ "grad_norm": 0.20616650581359863,
181
+ "learning_rate": 5.174497483512506e-05,
182
+ "loss": 6.8328,
183
+ "step": 54
184
+ },
185
+ {
186
+ "epoch": 0.2755102040816326,
187
+ "eval_loss": 6.832831859588623,
188
+ "eval_runtime": 0.2703,
189
+ "eval_samples_per_second": 610.543,
190
+ "eval_steps_per_second": 155.411,
191
+ "step": 54
192
  }
193
  ],
194
  "logging_steps": 3,
 
208
  "attributes": {}
209
  }
210
  },
211
+ "total_flos": 519009140736.0,
212
  "train_batch_size": 4,
213
  "trial_name": null,
214
  "trial_params": null