Femboyuwu2000 commited on
Commit
7456852
1 Parent(s): 0d8c69b

Training in progress, step 9340, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e12c37b04f848c57b1fbaa1586497570ce04b4c474a41d82ca6927d46cfafc8
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:751f9f960dca5d1bc5b5f0841d1b2cd22831c5a9dd0e4a5ca9f8a71c9bacbd5a
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be6249a73b51acc1e605e0c726f6b0c6b5b71b95e8e1b3bd37e1fd6605c078f1
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:568119d120e0792ab2007ac43295445f846bf7d46a353f3c9997e7874c17c32e
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4030094ed5282ad80d870d012a583b682ade163de00f842f2ba8ebc8caafb7bc
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9a0d75844d6619f1d43c17c2161774384d43f6fff4ebf0f3423379b5d726d2c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab2f7e65908b7a6affd31bf3469c124def8863b28efc3fb8855df9f9b8567a7c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c57b84b3c01996ecacc9d6df616fa036107288915c2593ed0d7afa9b69854186
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7456,
5
  "eval_steps": 500,
6
- "global_step": 9320,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3269,6 +3269,13 @@
3269
  "learning_rate": 1.7370505494839012e-05,
3270
  "loss": 3.4542,
3271
  "step": 9320
 
 
 
 
 
 
 
3272
  }
3273
  ],
3274
  "logging_steps": 20,
@@ -3276,7 +3283,7 @@
3276
  "num_input_tokens_seen": 0,
3277
  "num_train_epochs": 2,
3278
  "save_steps": 20,
3279
- "total_flos": 2.203989782819635e+16,
3280
  "train_batch_size": 8,
3281
  "trial_name": null,
3282
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7472,
5
  "eval_steps": 500,
6
+ "global_step": 9340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3269
  "learning_rate": 1.7370505494839012e-05,
3270
  "loss": 3.4542,
3271
  "step": 9320
3272
+ },
3273
+ {
3274
+ "epoch": 0.75,
3275
+ "grad_norm": 30.222015380859375,
3276
+ "learning_rate": 1.73225223029007e-05,
3277
+ "loss": 3.5017,
3278
+ "step": 9340
3279
  }
3280
  ],
3281
  "logging_steps": 20,
 
3283
  "num_input_tokens_seen": 0,
3284
  "num_train_epochs": 2,
3285
  "save_steps": 20,
3286
+ "total_flos": 2.208257832045773e+16,
3287
  "train_batch_size": 8,
3288
  "trial_name": null,
3289
  "trial_params": null