leixa commited on
Commit
5f793e5
·
verified ·
1 Parent(s): 9915af3

Training in progress, step 420, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80ababc76d414a3afda1e50007a605bd4376359db0b794d4bc845165b4cecd9b
3
  size 201892112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:206183a2c99fca5965a2c3ad9629779d179ddd58a11f46227151b1b3c3c15bb7
3
  size 201892112
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3023e0d5ddef1baf9cb4c5e49655a72087a416ca4f489e97d5bc70ad963d8b1e
3
  size 102864868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:870b066dde9df96bf56be9482f9ed2fef198f645980e68d46f8741963c7929a8
3
  size 102864868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c3a22211f7dda4b0e0f527fa4802e452b6112cd704d0429874313a62a33ed2b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebf598a3ab84e12b7b9b0f1cf713464c6133fe6762dd3be870c678737b1f8aaa
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ac207b57c6cefba3838e335ba7ebf320ffdaee8162f1c0afc72ea9ad9f0725f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9507fdefdfac1d5dbc7a23a3aeb675b6dd3cc22a3762b7e85ff02a1c9c43105
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07923697725605282,
5
  "eval_steps": 42,
6
- "global_step": 378,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -969,6 +969,112 @@
969
  "eval_samples_per_second": 48.686,
970
  "eval_steps_per_second": 6.09,
971
  "step": 378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
972
  }
973
  ],
974
  "logging_steps": 3,
@@ -988,7 +1094,7 @@
988
  "attributes": {}
989
  }
990
  },
991
- "total_flos": 8.06329888234537e+16,
992
  "train_batch_size": 8,
993
  "trial_name": null,
994
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0880410858400587,
5
  "eval_steps": 42,
6
+ "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
969
  "eval_samples_per_second": 48.686,
970
  "eval_steps_per_second": 6.09,
971
  "step": 378
972
+ },
973
+ {
974
+ "epoch": 0.07986584215491038,
975
+ "grad_norm": 0.3825712203979492,
976
+ "learning_rate": 1.3860256808630428e-05,
977
+ "loss": 1.1019,
978
+ "step": 381
979
+ },
980
+ {
981
+ "epoch": 0.08049470705376795,
982
+ "grad_norm": 0.39544931054115295,
983
+ "learning_rate": 1.3202379370768252e-05,
984
+ "loss": 1.0646,
985
+ "step": 384
986
+ },
987
+ {
988
+ "epoch": 0.08112357195262551,
989
+ "grad_norm": 0.3967124819755554,
990
+ "learning_rate": 1.2558115014363592e-05,
991
+ "loss": 1.0394,
992
+ "step": 387
993
+ },
994
+ {
995
+ "epoch": 0.08175243685148308,
996
+ "grad_norm": 0.39682498574256897,
997
+ "learning_rate": 1.1927702081543279e-05,
998
+ "loss": 1.0968,
999
+ "step": 390
1000
+ },
1001
+ {
1002
+ "epoch": 0.08238130175034064,
1003
+ "grad_norm": 0.3636738359928131,
1004
+ "learning_rate": 1.1311373790174657e-05,
1005
+ "loss": 1.031,
1006
+ "step": 393
1007
+ },
1008
+ {
1009
+ "epoch": 0.08301016664919819,
1010
+ "grad_norm": 0.32566869258880615,
1011
+ "learning_rate": 1.0709358147587884e-05,
1012
+ "loss": 1.094,
1013
+ "step": 396
1014
+ },
1015
+ {
1016
+ "epoch": 0.08363903154805576,
1017
+ "grad_norm": 0.38560715317726135,
1018
+ "learning_rate": 1.0121877866225781e-05,
1019
+ "loss": 1.1144,
1020
+ "step": 399
1021
+ },
1022
+ {
1023
+ "epoch": 0.08426789644691332,
1024
+ "grad_norm": 0.32941296696662903,
1025
+ "learning_rate": 9.549150281252633e-06,
1026
+ "loss": 1.0141,
1027
+ "step": 402
1028
+ },
1029
+ {
1030
+ "epoch": 0.08489676134577089,
1031
+ "grad_norm": 0.36348846554756165,
1032
+ "learning_rate": 8.991387270152201e-06,
1033
+ "loss": 1.043,
1034
+ "step": 405
1035
+ },
1036
+ {
1037
+ "epoch": 0.08552562624462845,
1038
+ "grad_norm": 0.37229597568511963,
1039
+ "learning_rate": 8.448795174344804e-06,
1040
+ "loss": 1.077,
1041
+ "step": 408
1042
+ },
1043
+ {
1044
+ "epoch": 0.08615449114348601,
1045
+ "grad_norm": 0.3720276653766632,
1046
+ "learning_rate": 7.921574722852343e-06,
1047
+ "loss": 0.9869,
1048
+ "step": 411
1049
+ },
1050
+ {
1051
+ "epoch": 0.08678335604234357,
1052
+ "grad_norm": 0.3702375590801239,
1053
+ "learning_rate": 7.409920958039795e-06,
1054
+ "loss": 0.9772,
1055
+ "step": 414
1056
+ },
1057
+ {
1058
+ "epoch": 0.08741222094120113,
1059
+ "grad_norm": 0.37576282024383545,
1060
+ "learning_rate": 6.9140231634602485e-06,
1061
+ "loss": 1.0302,
1062
+ "step": 417
1063
+ },
1064
+ {
1065
+ "epoch": 0.0880410858400587,
1066
+ "grad_norm": 0.39544618129730225,
1067
+ "learning_rate": 6.43406479383053e-06,
1068
+ "loss": 1.0528,
1069
+ "step": 420
1070
+ },
1071
+ {
1072
+ "epoch": 0.0880410858400587,
1073
+ "eval_loss": 1.0637660026550293,
1074
+ "eval_runtime": 165.1277,
1075
+ "eval_samples_per_second": 48.659,
1076
+ "eval_steps_per_second": 6.086,
1077
+ "step": 420
1078
  }
1079
  ],
1080
  "logging_steps": 3,
 
1094
  "attributes": {}
1095
  }
1096
  },
1097
+ "total_flos": 8.959220980383744e+16,
1098
  "train_batch_size": 8,
1099
  "trial_name": null,
1100
  "trial_params": null