Femboyuwu2000 commited on
Commit
f0237ea
1 Parent(s): deb02ff

Training in progress, step 3740, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f13d441278584800305f9e26da4273dad6d7875090921b28e26b45c6774d333c
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70fc65989648f7dc64734e8244e0e6879cfab6125b857d5586fade7a4f45ce0c
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:649594190d4a72c9cfb02a6d63322d7c898194d9e59c41bc49d7da475d2a0c87
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5f3e74dabdaf3ee4c5231da9311a563abc6cfd59391a593a38304bcd6053332
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c91443f3ba02f1daf1ad9b67315df20b52be0e5bd1e6256f5526e1158a26ea37
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98c6a86fe7a3ce61d0893ea3b91307b0a6fe853d64a359a291f505b9ac01cb7d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccbe4cbbe78988717a93eeb711aa1df5c2d1ab7d6064c8a0774ed1c855a9c67d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c2710cfbdebccda3472bd5dbf8574523b547a65e026efc1ddfdf0be454ac800
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2976,
5
  "eval_steps": 500,
6
- "global_step": 3720,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1309,6 +1309,13 @@
1309
  "learning_rate": 2.812583271253125e-05,
1310
  "loss": 3.6265,
1311
  "step": 3720
 
 
 
 
 
 
 
1312
  }
1313
  ],
1314
  "logging_steps": 20,
@@ -1316,7 +1323,7 @@
1316
  "num_input_tokens_seen": 0,
1317
  "num_train_epochs": 2,
1318
  "save_steps": 20,
1319
- "total_flos": 8783028556234752.0,
1320
  "train_batch_size": 8,
1321
  "trial_name": null,
1322
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2992,
5
  "eval_steps": 500,
6
+ "global_step": 3740,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1309
  "learning_rate": 2.812583271253125e-05,
1310
  "loss": 3.6265,
1311
  "step": 3720
1312
+ },
1313
+ {
1314
+ "epoch": 0.3,
1315
+ "grad_norm": 25.85703468322754,
1316
+ "learning_rate": 2.8102249451758162e-05,
1317
+ "loss": 3.5619,
1318
+ "step": 3740
1319
  }
1320
  ],
1321
  "logging_steps": 20,
 
1323
  "num_input_tokens_seen": 0,
1324
  "num_train_epochs": 2,
1325
  "save_steps": 20,
1326
+ "total_flos": 8827316538212352.0,
1327
  "train_batch_size": 8,
1328
  "trial_name": null,
1329
  "trial_params": null