Femboyuwu2000
commited on
Commit
•
b153248
1
Parent(s):
12795b3
Training in progress, step 5700, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 13982248
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:59d1f0fae334930ed8079d00f44c27d89643fc0441c8182fd6c384af44315cb0
|
3 |
size 13982248
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 7062522
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dbf70e69484ef4930af6adb3972e5d151c2e40923012c6e59178aa5b08fc0f62
|
3 |
size 7062522
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e9d9c35639598e4263e7ecc7045b5427db463c314e637805772291b54322a1a
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0d31553c5ae8347a198cb4d56280c26db3af2cfa5039328d122439d6a52e0f08
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -1974,6 +1974,34 @@
|
|
1974 |
"learning_rate": 2.5310626050032873e-05,
|
1975 |
"loss": 3.5598,
|
1976 |
"step": 5620
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1977 |
}
|
1978 |
],
|
1979 |
"logging_steps": 20,
|
@@ -1981,7 +2009,7 @@
|
|
1981 |
"num_input_tokens_seen": 0,
|
1982 |
"num_train_epochs": 2,
|
1983 |
"save_steps": 20,
|
1984 |
-
"total_flos": 1.
|
1985 |
"train_batch_size": 8,
|
1986 |
"trial_name": null,
|
1987 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.456,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 5700,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
1974 |
"learning_rate": 2.5310626050032873e-05,
|
1975 |
"loss": 3.5598,
|
1976 |
"step": 5620
|
1977 |
+
},
|
1978 |
+
{
|
1979 |
+
"epoch": 0.45,
|
1980 |
+
"grad_norm": 21.44734764099121,
|
1981 |
+
"learning_rate": 2.527528728730582e-05,
|
1982 |
+
"loss": 3.5189,
|
1983 |
+
"step": 5640
|
1984 |
+
},
|
1985 |
+
{
|
1986 |
+
"epoch": 0.45,
|
1987 |
+
"grad_norm": 26.825183868408203,
|
1988 |
+
"learning_rate": 2.5239840741677307e-05,
|
1989 |
+
"loss": 3.6052,
|
1990 |
+
"step": 5660
|
1991 |
+
},
|
1992 |
+
{
|
1993 |
+
"epoch": 0.45,
|
1994 |
+
"grad_norm": 29.19519805908203,
|
1995 |
+
"learning_rate": 2.5204286784964823e-05,
|
1996 |
+
"loss": 3.4724,
|
1997 |
+
"step": 5680
|
1998 |
+
},
|
1999 |
+
{
|
2000 |
+
"epoch": 0.46,
|
2001 |
+
"grad_norm": 37.9902229309082,
|
2002 |
+
"learning_rate": 2.516862579011255e-05,
|
2003 |
+
"loss": 3.3665,
|
2004 |
+
"step": 5700
|
2005 |
}
|
2006 |
],
|
2007 |
"logging_steps": 20,
|
|
|
2009 |
"num_input_tokens_seen": 0,
|
2010 |
"num_train_epochs": 2,
|
2011 |
"save_steps": 20,
|
2012 |
+
"total_flos": 1.3500387561013248e+16,
|
2013 |
"train_batch_size": 8,
|
2014 |
"trial_name": null,
|
2015 |
"trial_params": null
|