Femboyuwu2000 commited on
Commit
44bfcda
1 Parent(s): 34ef90a

Training in progress, step 680, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be0bca22803c9150df03e6466773cd219b4542d072342c711ec03c97247eb64c
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03e4d7a2e6b882be3d4a0cc7b960dffb4a278acc68bfa17fed5afd5a7ba89cc9
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:835a7525c06da7ed873e20c691c9e2e2c793823b899c56c236618edeabdee1e4
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6f1b2f0c9e7cd6b2c29b7325f99e1c569e645a85e246f18e83ddfd1cc60b9ed
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:084dccba20efd6b3e5dec3fea90abdf37dfbc8e08f2e8403d8ff8352b73dbaf5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:975cd3c0d8ed686eac6636689f78b9ba276d9ea46ea9d8f16d84080c1f677717
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d360c1de8e8e92eec017cc71d37bb9f185d942b9274c99e38578ff0397698ae
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5304725a872898493d436386ca2c07b5b449e34cbdcdf743fbe4d162bd012bf4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0528,
5
  "eval_steps": 500,
6
- "global_step": 660,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -238,6 +238,13 @@
238
  "learning_rate": 2.99992919618918e-05,
239
  "loss": 3.7735,
240
  "step": 660
 
 
 
 
 
 
 
241
  }
242
  ],
243
  "logging_steps": 20,
@@ -245,7 +252,7 @@
245
  "num_input_tokens_seen": 0,
246
  "num_train_epochs": 2,
247
  "save_steps": 20,
248
- "total_flos": 1566580743241728.0,
249
  "train_batch_size": 8,
250
  "trial_name": null,
251
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0544,
5
  "eval_steps": 500,
6
+ "global_step": 680,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
238
  "learning_rate": 2.99992919618918e-05,
239
  "loss": 3.7735,
240
  "step": 660
241
+ },
242
+ {
243
+ "epoch": 0.05,
244
+ "grad_norm": 39.445220947265625,
245
+ "learning_rate": 2.999874127328748e-05,
246
+ "loss": 3.759,
247
+ "step": 680
248
  }
249
  ],
250
  "logging_steps": 20,
 
252
  "num_input_tokens_seen": 0,
253
  "num_train_epochs": 2,
254
  "save_steps": 20,
255
+ "total_flos": 1618578114674688.0,
256
  "train_batch_size": 8,
257
  "trial_name": null,
258
  "trial_params": null