Femboyuwu2000 commited on
Commit
56e02fb
1 Parent(s): 02c99d5

Training in progress, step 6480, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4edb15d0e1b52d3bdcfb17044d4d5c2d1bc11a9b89494c965b286c71d0b836fb
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a30a008dae072195fc28eccb26ab5bd9eb4cd09d5723d01bd5eef06c2f548cc
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72ef6fc0399c4259225d888ed2a17fdb3c50a5cc0882ff3768013ef0d4c4d8dd
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4c548ed51d455157bc66ff27bbdf7cc0ece0b25d6b6583885fd9166381cfa1f
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3530b4cc0dc079d6d7d3f5eab27538955528f7a1c8ce95515130807a04042a5c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab96dece0057048172e676099e7a2287bdd92408cbb9619002ab46488332801f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b967ab9913addda1efeba2ec99f16fcff83f1b776c306d39cf95d5b4425c3f06
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f067d06bd847c479f71bde77c89318403c2c2dbcdb7f1dd86724b30b9fac060c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5168,
5
  "eval_steps": 500,
6
- "global_step": 6460,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2268,6 +2268,13 @@
2268
  "learning_rate": 2.3737988124226834e-05,
2269
  "loss": 3.422,
2270
  "step": 6460
 
 
 
 
 
 
 
2271
  }
2272
  ],
2273
  "logging_steps": 20,
@@ -2275,7 +2282,7 @@
2275
  "num_input_tokens_seen": 0,
2276
  "num_train_epochs": 2,
2277
  "save_steps": 20,
2278
- "total_flos": 1.5247728882548736e+16,
2279
  "train_batch_size": 8,
2280
  "trial_name": null,
2281
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5184,
5
  "eval_steps": 500,
6
+ "global_step": 6480,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2268
  "learning_rate": 2.3737988124226834e-05,
2269
  "loss": 3.422,
2270
  "step": 6460
2271
+ },
2272
+ {
2273
+ "epoch": 0.52,
2274
+ "grad_norm": 28.162084579467773,
2275
+ "learning_rate": 2.3698455101245052e-05,
2276
+ "loss": 3.5403,
2277
+ "step": 6480
2278
  }
2279
  ],
2280
  "logging_steps": 20,
 
2282
  "num_input_tokens_seen": 0,
2283
  "num_train_epochs": 2,
2284
  "save_steps": 20,
2285
+ "total_flos": 1.5298774882516992e+16,
2286
  "train_batch_size": 8,
2287
  "trial_name": null,
2288
  "trial_params": null