Femboyuwu2000 commited on
Commit
c0f000f
1 Parent(s): 125e4b9

Training in progress, step 6660, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e70f472f3208fce22e711727d3f51a955b9502ee4635cf3b354a7a5849495f64
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:765933f0e535032650bafcefa1a947ab8a32b8bace5e91a869c8d023f473bbcc
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ce48b5f0a4182d0f3ca44b8f6292a94f1e4dd3c84342481403240c0341f492c
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63d11185a3e70b21eb44c951a672b731aa6be859ffabc9e46db234a2d5ba4ad8
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f483dbbbbb5dc1b41e41475eede183d58a38381ca60f4870fc45bb7a64baedfd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa10b0a2acb8a95d8431784ea3421efb241e65fea072752db25baf816c8a24aa
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:127254ff30131eb4f46f0ab26f5bfb221842284bc63a371736d3849e9dc192b7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae054faf87e180e83b9ebfb98f4e8927964f8f999fd61086772e7360b72432e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5312,
5
  "eval_steps": 500,
6
- "global_step": 6640,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2331,6 +2331,13 @@
2331
  "learning_rate": 2.3378941214219545e-05,
2332
  "loss": 3.5401,
2333
  "step": 6640
 
 
 
 
 
 
 
2334
  }
2335
  ],
2336
  "logging_steps": 20,
@@ -2338,7 +2345,7 @@
2338
  "num_input_tokens_seen": 0,
2339
  "num_train_epochs": 2,
2340
  "save_steps": 20,
2341
- "total_flos": 1.5672073361719296e+16,
2342
  "train_batch_size": 8,
2343
  "trial_name": null,
2344
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5328,
5
  "eval_steps": 500,
6
+ "global_step": 6660,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2331
  "learning_rate": 2.3378941214219545e-05,
2332
  "loss": 3.5401,
2333
  "step": 6640
2334
+ },
2335
+ {
2336
+ "epoch": 0.53,
2337
+ "grad_norm": 29.300914764404297,
2338
+ "learning_rate": 2.3338602049646372e-05,
2339
+ "loss": 3.4601,
2340
+ "step": 6660
2341
  }
2342
  ],
2343
  "logging_steps": 20,
 
2345
  "num_input_tokens_seen": 0,
2346
  "num_train_epochs": 2,
2347
  "save_steps": 20,
2348
+ "total_flos": 1.5720560500506624e+16,
2349
  "train_batch_size": 8,
2350
  "trial_name": null,
2351
  "trial_params": null