ardaspear commited on
Commit
2b4a2c8
·
verified ·
1 Parent(s): 454fdc6

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b66c7b85bba815a8496aa5bfc93e033b16bb59359391fe05c32b291939b0aa8
3
  size 191968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bb9a096db4afd4939889cf29b28bce0cd60c204e43b83bde086473e9fa95635
3
  size 191968
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0141d94ab6887d19d624f67d83f76f79885deaeac459ecb525ff6e5b7df4afe
3
  size 253144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f93824adee0ed2d7e519db2a451ec9064b5bb5f6d49be8d009d012c5ec3eda2
3
  size 253144
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:005d9a86e0834fe0802ab2e1c3fa9eca806e3cae26d1cbb14ab848a758219c13
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4faae14a1aa6472da1e35d90c3eeeb0dd65ba4a4be7087c6d32cacd0c607e1b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88f387d8c434535a84694e469cebc18f2e722ba31b0dc0372632798b59011377
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd18e5325bf99a4b662a5314e5f491b65903ffc83b0317f63835d8305a50591e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07900677200902935,
5
  "eval_steps": 5,
6
- "global_step": 35,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -148,6 +148,28 @@
148
  "eval_samples_per_second": 553.69,
149
  "eval_steps_per_second": 69.675,
150
  "step": 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  }
152
  ],
153
  "logging_steps": 3,
@@ -167,7 +189,7 @@
167
  "attributes": {}
168
  }
169
  },
170
- "total_flos": 3904273121280.0,
171
  "train_batch_size": 8,
172
  "trial_name": null,
173
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09029345372460497,
5
  "eval_steps": 5,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
148
  "eval_samples_per_second": 553.69,
149
  "eval_steps_per_second": 69.675,
150
  "step": 35
151
+ },
152
+ {
153
+ "epoch": 0.08126410835214447,
154
+ "grad_norm": 0.54636549949646,
155
+ "learning_rate": 2.7300475013022663e-05,
156
+ "loss": 10.3207,
157
+ "step": 36
158
+ },
159
+ {
160
+ "epoch": 0.08803611738148984,
161
+ "grad_norm": 0.703861653804779,
162
+ "learning_rate": 1.7527597583490822e-05,
163
+ "loss": 10.3198,
164
+ "step": 39
165
+ },
166
+ {
167
+ "epoch": 0.09029345372460497,
168
+ "eval_loss": 10.316521644592285,
169
+ "eval_runtime": 1.3669,
170
+ "eval_samples_per_second": 546.511,
171
+ "eval_steps_per_second": 68.771,
172
+ "step": 40
173
  }
174
  ],
175
  "logging_steps": 3,
 
189
  "attributes": {}
190
  }
191
  },
192
+ "total_flos": 4462026424320.0,
193
  "train_batch_size": 8,
194
  "trial_name": null,
195
  "trial_params": null