Femboyuwu2000 commited on
Commit
5ae27a8
1 Parent(s): 352790d

Training in progress, step 3360, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6cb99fa64546939c539783d29a25c42425044ee987ecc3e5696b8534c4e9f345
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1698d0b9850198369e888fcfaf13d37be26de99003ada18e391207f1b5ff228
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd641e98929b8b276a8de167087ef9ff9d89754bf78fd580019c0c5c609853d1
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8fe17bfb83911cb927d541d4a34d80b626c32a1c080df866cc26b5c30ec4ff1
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8da95d53fc7692aea2c9104930a96fc9ba7cf5e6583572212fa22dac8a3dfc0b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db2bdbe63109eacd97a36be96f1c1369969403fe38eb6536eaee017f63bad8f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95230278efb9ecdcab5e159a83a7f79b4af787377ae2ff1f8e2e651daab268eb
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d7c2d8201e8b73967831cd725d7a9e1ee970c8f0d997a055abf4549ddfa7435
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2672,
5
  "eval_steps": 500,
6
- "global_step": 3340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1176,6 +1176,13 @@
1176
  "learning_rate": 2.8547481435507382e-05,
1177
  "loss": 3.5292,
1178
  "step": 3340
 
 
 
 
 
 
 
1179
  }
1180
  ],
1181
  "logging_steps": 20,
@@ -1183,7 +1190,7 @@
1183
  "num_input_tokens_seen": 0,
1184
  "num_train_epochs": 2,
1185
  "save_steps": 20,
1186
- "total_flos": 7927089491214336.0,
1187
  "train_batch_size": 8,
1188
  "trial_name": null,
1189
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2688,
5
  "eval_steps": 500,
6
+ "global_step": 3360,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1176
  "learning_rate": 2.8547481435507382e-05,
1177
  "loss": 3.5292,
1178
  "step": 3340
1179
+ },
1180
+ {
1181
+ "epoch": 0.27,
1182
+ "grad_norm": 23.902685165405273,
1183
+ "learning_rate": 2.852655480925828e-05,
1184
+ "loss": 3.5195,
1185
+ "step": 3360
1186
  }
1187
  ],
1188
  "logging_steps": 20,
 
1190
  "num_input_tokens_seen": 0,
1191
  "num_train_epochs": 2,
1192
  "save_steps": 20,
1193
+ "total_flos": 7983843719970816.0,
1194
  "train_batch_size": 8,
1195
  "trial_name": null,
1196
  "trial_params": null