Femboyuwu2000 commited on
Commit
cfda4d2
1 Parent(s): cc635ab

Training in progress, step 6680, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:765933f0e535032650bafcefa1a947ab8a32b8bace5e91a869c8d023f473bbcc
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c13f2cc73f2b50eb3054c4f9f34e24e00ccbb2ee12f40760019fdb6ba4a7264
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63d11185a3e70b21eb44c951a672b731aa6be859ffabc9e46db234a2d5ba4ad8
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dc0a407b3cd6fb8c2009a5a2e28b1a647ee883170696f020ccd1bf2d47f70d5
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa10b0a2acb8a95d8431784ea3421efb241e65fea072752db25baf816c8a24aa
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dc43f4f08c4cb18483c623c72390b89d9e5c0e070695c8546fdb6db415c2659
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ae054faf87e180e83b9ebfb98f4e8927964f8f999fd61086772e7360b72432e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64e6df07c2fbddd7c7262a897eedc5919a19077162b1c7b5831b4ba1cffd7cac
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5328,
5
  "eval_steps": 500,
6
- "global_step": 6660,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2338,6 +2338,13 @@
2338
  "learning_rate": 2.3338602049646372e-05,
2339
  "loss": 3.4601,
2340
  "step": 6660
 
 
 
 
 
 
 
2341
  }
2342
  ],
2343
  "logging_steps": 20,
@@ -2345,7 +2352,7 @@
2345
  "num_input_tokens_seen": 0,
2346
  "num_train_epochs": 2,
2347
  "save_steps": 20,
2348
- "total_flos": 1.5720560500506624e+16,
2349
  "train_batch_size": 8,
2350
  "trial_name": null,
2351
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5344,
5
  "eval_steps": 500,
6
+ "global_step": 6680,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2338
  "learning_rate": 2.3338602049646372e-05,
2339
  "loss": 3.4601,
2340
  "step": 6660
2341
+ },
2342
+ {
2343
+ "epoch": 0.53,
2344
+ "grad_norm": 27.341121673583984,
2345
+ "learning_rate": 2.329817541708346e-05,
2346
+ "loss": 3.4163,
2347
+ "step": 6680
2348
  }
2349
  ],
2350
  "logging_steps": 20,
 
2352
  "num_input_tokens_seen": 0,
2353
  "num_train_epochs": 2,
2354
  "save_steps": 20,
2355
+ "total_flos": 1.5763273798680576e+16,
2356
  "train_batch_size": 8,
2357
  "trial_name": null,
2358
  "trial_params": null