Femboyuwu2000
commited on
Commit
•
9f2e91a
1
Parent(s):
94a67e0
Training in progress, step 6460, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 13982248
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4edb15d0e1b52d3bdcfb17044d4d5c2d1bc11a9b89494c965b286c71d0b836fb
|
3 |
size 13982248
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 7062522
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:72ef6fc0399c4259225d888ed2a17fdb3c50a5cc0882ff3768013ef0d4c4d8dd
|
3 |
size 7062522
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3530b4cc0dc079d6d7d3f5eab27538955528f7a1c8ce95515130807a04042a5c
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b967ab9913addda1efeba2ec99f16fcff83f1b776c306d39cf95d5b4425c3f06
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -2247,6 +2247,27 @@
|
|
2247 |
"learning_rate": 2.385603559508884e-05,
|
2248 |
"loss": 3.5718,
|
2249 |
"step": 6400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2250 |
}
|
2251 |
],
|
2252 |
"logging_steps": 20,
|
@@ -2254,7 +2275,7 @@
|
|
2254 |
"num_input_tokens_seen": 0,
|
2255 |
"num_train_epochs": 2,
|
2256 |
"save_steps": 20,
|
2257 |
-
"total_flos": 1.
|
2258 |
"train_batch_size": 8,
|
2259 |
"trial_name": null,
|
2260 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.5168,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 6460,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
2247 |
"learning_rate": 2.385603559508884e-05,
|
2248 |
"loss": 3.5718,
|
2249 |
"step": 6400
|
2250 |
+
},
|
2251 |
+
{
|
2252 |
+
"epoch": 0.51,
|
2253 |
+
"grad_norm": 30.199399948120117,
|
2254 |
+
"learning_rate": 2.3816778784387097e-05,
|
2255 |
+
"loss": 3.4446,
|
2256 |
+
"step": 6420
|
2257 |
+
},
|
2258 |
+
{
|
2259 |
+
"epoch": 0.52,
|
2260 |
+
"grad_norm": 53.968971252441406,
|
2261 |
+
"learning_rate": 2.3777429489847935e-05,
|
2262 |
+
"loss": 3.5161,
|
2263 |
+
"step": 6440
|
2264 |
+
},
|
2265 |
+
{
|
2266 |
+
"epoch": 0.52,
|
2267 |
+
"grad_norm": 34.33303451538086,
|
2268 |
+
"learning_rate": 2.3737988124226834e-05,
|
2269 |
+
"loss": 3.422,
|
2270 |
+
"step": 6460
|
2271 |
}
|
2272 |
],
|
2273 |
"logging_steps": 20,
|
|
|
2275 |
"num_input_tokens_seen": 0,
|
2276 |
"num_train_epochs": 2,
|
2277 |
"save_steps": 20,
|
2278 |
+
"total_flos": 1.5247728882548736e+16,
|
2279 |
"train_batch_size": 8,
|
2280 |
"trial_name": null,
|
2281 |
"trial_params": null
|