Femboyuwu2000
commited on
Commit
•
f06b1ac
1
Parent(s):
96038a6
Training in progress, step 2620, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 13982248
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:102a934b9c2b16bd152b8fb45fb756ed8a39ee8f7db8b00ff347bd79c86fbef6
|
3 |
size 13982248
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 7062522
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:352473e3f8f3dae7afcda846881ada181c756f061e5a3d9beb1d4a3e068eeb5d
|
3 |
size 7062522
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fcc2667f22c446f99eb24a0261709aeedb26c632fa56415781522ee28f079a36
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b7e6b2fa8982dc42ac672b8fc6e736397d0e1ef6af55fabc97bdaa24968e490
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -833,6 +833,97 @@
|
|
833 |
"learning_rate": 2.9394880643660242e-05,
|
834 |
"loss": 3.5974,
|
835 |
"step": 2360
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
836 |
}
|
837 |
],
|
838 |
"logging_steps": 20,
|
@@ -840,7 +931,7 @@
|
|
840 |
"num_input_tokens_seen": 0,
|
841 |
"num_train_epochs": 2,
|
842 |
"save_steps": 20,
|
843 |
-
"total_flos":
|
844 |
"train_batch_size": 8,
|
845 |
"trial_name": null,
|
846 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.2096,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 2620,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
833 |
"learning_rate": 2.9394880643660242e-05,
|
834 |
"loss": 3.5974,
|
835 |
"step": 2360
|
836 |
+
},
|
837 |
+
{
|
838 |
+
"epoch": 0.19,
|
839 |
+
"grad_norm": 43.614994049072266,
|
840 |
+
"learning_rate": 2.938114567894659e-05,
|
841 |
+
"loss": 3.4834,
|
842 |
+
"step": 2380
|
843 |
+
},
|
844 |
+
{
|
845 |
+
"epoch": 0.19,
|
846 |
+
"grad_norm": 27.587766647338867,
|
847 |
+
"learning_rate": 2.9367259862819805e-05,
|
848 |
+
"loss": 3.6154,
|
849 |
+
"step": 2400
|
850 |
+
},
|
851 |
+
{
|
852 |
+
"epoch": 0.19,
|
853 |
+
"grad_norm": 30.223772048950195,
|
854 |
+
"learning_rate": 2.9353223340935533e-05,
|
855 |
+
"loss": 3.4871,
|
856 |
+
"step": 2420
|
857 |
+
},
|
858 |
+
{
|
859 |
+
"epoch": 0.2,
|
860 |
+
"grad_norm": 34.057884216308594,
|
861 |
+
"learning_rate": 2.933903626053024e-05,
|
862 |
+
"loss": 3.605,
|
863 |
+
"step": 2440
|
864 |
+
},
|
865 |
+
{
|
866 |
+
"epoch": 0.2,
|
867 |
+
"grad_norm": 39.219242095947266,
|
868 |
+
"learning_rate": 2.932469877041969e-05,
|
869 |
+
"loss": 3.6091,
|
870 |
+
"step": 2460
|
871 |
+
},
|
872 |
+
{
|
873 |
+
"epoch": 0.2,
|
874 |
+
"grad_norm": 33.33955001831055,
|
875 |
+
"learning_rate": 2.931021102099737e-05,
|
876 |
+
"loss": 3.4862,
|
877 |
+
"step": 2480
|
878 |
+
},
|
879 |
+
{
|
880 |
+
"epoch": 0.2,
|
881 |
+
"grad_norm": 37.07484436035156,
|
882 |
+
"learning_rate": 2.9295573164232913e-05,
|
883 |
+
"loss": 3.5267,
|
884 |
+
"step": 2500
|
885 |
+
},
|
886 |
+
{
|
887 |
+
"epoch": 0.2,
|
888 |
+
"grad_norm": 27.145864486694336,
|
889 |
+
"learning_rate": 2.9280785353670514e-05,
|
890 |
+
"loss": 3.4369,
|
891 |
+
"step": 2520
|
892 |
+
},
|
893 |
+
{
|
894 |
+
"epoch": 0.2,
|
895 |
+
"grad_norm": 30.31035041809082,
|
896 |
+
"learning_rate": 2.9265847744427305e-05,
|
897 |
+
"loss": 3.6056,
|
898 |
+
"step": 2540
|
899 |
+
},
|
900 |
+
{
|
901 |
+
"epoch": 0.2,
|
902 |
+
"grad_norm": 40.823490142822266,
|
903 |
+
"learning_rate": 2.925076049319174e-05,
|
904 |
+
"loss": 3.5916,
|
905 |
+
"step": 2560
|
906 |
+
},
|
907 |
+
{
|
908 |
+
"epoch": 0.21,
|
909 |
+
"grad_norm": 44.224796295166016,
|
910 |
+
"learning_rate": 2.9235523758221944e-05,
|
911 |
+
"loss": 3.5881,
|
912 |
+
"step": 2580
|
913 |
+
},
|
914 |
+
{
|
915 |
+
"epoch": 0.21,
|
916 |
+
"grad_norm": 33.34773254394531,
|
917 |
+
"learning_rate": 2.922013769934406e-05,
|
918 |
+
"loss": 3.5315,
|
919 |
+
"step": 2600
|
920 |
+
},
|
921 |
+
{
|
922 |
+
"epoch": 0.21,
|
923 |
+
"grad_norm": 25.755775451660156,
|
924 |
+
"learning_rate": 2.920460247795056e-05,
|
925 |
+
"loss": 3.621,
|
926 |
+
"step": 2620
|
927 |
}
|
928 |
],
|
929 |
"logging_steps": 20,
|
|
|
931 |
"num_input_tokens_seen": 0,
|
932 |
"num_train_epochs": 2,
|
933 |
"save_steps": 20,
|
934 |
+
"total_flos": 6207501971718144.0,
|
935 |
"train_batch_size": 8,
|
936 |
"trial_name": null,
|
937 |
"trial_params": null
|