Nexspear commited on
Commit
721cc3a
·
verified ·
1 Parent(s): f61c8ac

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:473cc1b7c18be2967d46f87ef1c27dd2455038c0dbbd14e00e3ea51f96a5b490
3
  size 191968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e24236def62dcea983f4d2e6ba6fed88fa39b62f4691affd4ed1d8c2d8d8795a
3
  size 191968
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd220ddd424e463fcdc9d22a69889fc47893f491dcec5f0285a4e718ba580980
3
  size 253144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1129b9bdbc1f927b8794bd561f20c2710281cfd606fac34cd745c281e940db8
3
  size 253144
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1483fe5ee1de3351fec80a493a35970862157e2df5a1f01c39c538d167a91ab7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6f8a3071cc811ae52946b6ee778ddcd861ec51849b9667e2fa077e5381edaa2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88f387d8c434535a84694e469cebc18f2e722ba31b0dc0372632798b59011377
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd18e5325bf99a4b662a5314e5f491b65903ffc83b0317f63835d8305a50591e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02456571328303211,
5
  "eval_steps": 5,
6
- "global_step": 35,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -148,6 +148,28 @@
148
  "eval_samples_per_second": 460.341,
149
  "eval_steps_per_second": 57.543,
150
  "step": 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  }
152
  ],
153
  "logging_steps": 3,
@@ -167,7 +189,7 @@
167
  "attributes": {}
168
  }
169
  },
170
- "total_flos": 3987936116736.0,
171
  "train_batch_size": 8,
172
  "trial_name": null,
173
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.02807510089489384,
5
  "eval_steps": 5,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
148
  "eval_samples_per_second": 460.341,
149
  "eval_steps_per_second": 57.543,
150
  "step": 35
151
+ },
152
+ {
153
+ "epoch": 0.025267590805404458,
154
+ "grad_norm": 0.12015967071056366,
155
+ "learning_rate": 2.7300475013022663e-05,
156
+ "loss": 10.366,
157
+ "step": 36
158
+ },
159
+ {
160
+ "epoch": 0.027373223372521495,
161
+ "grad_norm": 0.1509571224451065,
162
+ "learning_rate": 1.7527597583490822e-05,
163
+ "loss": 10.3625,
164
+ "step": 39
165
+ },
166
+ {
167
+ "epoch": 0.02807510089489384,
168
+ "eval_loss": 10.361713409423828,
169
+ "eval_runtime": 5.2202,
170
+ "eval_samples_per_second": 459.755,
171
+ "eval_steps_per_second": 57.469,
172
+ "step": 40
173
  }
174
  ],
175
  "logging_steps": 3,
 
189
  "attributes": {}
190
  }
191
  },
192
+ "total_flos": 4629352415232.0,
193
  "train_batch_size": 8,
194
  "trial_name": null,
195
  "trial_params": null