ardaspear commited on
Commit
b996110
·
verified ·
1 Parent(s): e5f9268

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a932a0faac4b641bf72ce63748ddde2dd62fcaf2789e90f0d14249273ee7e352
3
  size 2373352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c75aea2cb6864ef76771888ea30ca51b7823e102dd0050fd5ea5fa1cae67064d
3
  size 2373352
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e28fe608ae6053ba54c94c3375caaf9cf2499321d53b93bfafae7f3423f342e6
3
  size 4830714
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe436795477c2ac2e8b8987ca266618f4b59acddcb78d9db181303ebd5ab8ce2
3
  size 4830714
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:016983c686e3f6559a80f761a5aee2d676a2ab199aac6efd1774aeda9637b12d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:639cadb2d5c8341c00ec24a9d5b07827b6952f480b2b56a327b15dcedfc3c4d1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88f387d8c434535a84694e469cebc18f2e722ba31b0dc0372632798b59011377
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd18e5325bf99a4b662a5314e5f491b65903ffc83b0317f63835d8305a50591e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.020633750921149593,
5
  "eval_steps": 5,
6
- "global_step": 35,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -148,6 +148,28 @@
148
  "eval_samples_per_second": 135.008,
149
  "eval_steps_per_second": 16.917,
150
  "step": 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  }
152
  ],
153
  "logging_steps": 3,
@@ -167,7 +189,7 @@
167
  "attributes": {}
168
  }
169
  },
170
- "total_flos": 38562693120000.0,
171
  "train_batch_size": 8,
172
  "trial_name": null,
173
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.023581429624170966,
5
  "eval_steps": 5,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
148
  "eval_samples_per_second": 135.008,
149
  "eval_steps_per_second": 16.917,
150
  "step": 35
151
+ },
152
+ {
153
+ "epoch": 0.021223286661753868,
154
+ "grad_norm": 0.3654229938983917,
155
+ "learning_rate": 2.7300475013022663e-05,
156
+ "loss": 11.8738,
157
+ "step": 36
158
+ },
159
+ {
160
+ "epoch": 0.02299189388356669,
161
+ "grad_norm": 0.3954028785228729,
162
+ "learning_rate": 1.7527597583490822e-05,
163
+ "loss": 11.8702,
164
+ "step": 39
165
+ },
166
+ {
167
+ "epoch": 0.023581429624170966,
168
+ "eval_loss": 11.871051788330078,
169
+ "eval_runtime": 21.0709,
170
+ "eval_samples_per_second": 135.59,
171
+ "eval_steps_per_second": 16.99,
172
+ "step": 40
173
  }
174
  ],
175
  "logging_steps": 3,
 
189
  "attributes": {}
190
  }
191
  },
192
+ "total_flos": 44071649280000.0,
193
  "train_batch_size": 8,
194
  "trial_name": null,
195
  "trial_params": null