leixa commited on
Commit
50e488a
·
verified ·
1 Parent(s): 5b9d2e0

Training in progress, step 420, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d063cd2f43da55fad1cd08c0ec68730795948d509943ac12d0989fc1b042902a
3
  size 93608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c65f2723695d55c4676894a4cc573fa60c43c6f9d8e6b10a5f8338a48cb5495
3
  size 93608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:307011717607beb98059ae3e6c8abc5c930ba32f7ed58acaee35a67539119e6e
3
  size 197158
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d0f7e9bc2d46e8ee5840e716e44a1192336abc75f777e9c4ef214dc13e626af
3
  size 197158
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdb8f50440c4f92407ced559dfc13a069c63b86644238d589ef96192341f8039
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cdfe269170b74b584fdf82fb7f4fe475adef4c7006c041ed6cb8c1885b4ecaa
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ac207b57c6cefba3838e335ba7ebf320ffdaee8162f1c0afc72ea9ad9f0725f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9507fdefdfac1d5dbc7a23a3aeb675b6dd3cc22a3762b7e85ff02a1c9c43105
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0693069306930694,
5
  "eval_steps": 42,
6
- "global_step": 378,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -969,6 +969,112 @@
969
  "eval_samples_per_second": 89.851,
970
  "eval_steps_per_second": 11.307,
971
  "step": 378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
972
  }
973
  ],
974
  "logging_steps": 3,
@@ -988,7 +1094,7 @@
988
  "attributes": {}
989
  }
990
  },
991
- "total_flos": 2328761303040.0,
992
  "train_batch_size": 8,
993
  "trial_name": null,
994
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.188118811881188,
5
  "eval_steps": 42,
6
+ "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
969
  "eval_samples_per_second": 89.851,
970
  "eval_steps_per_second": 11.307,
971
  "step": 378
972
+ },
973
+ {
974
+ "epoch": 1.0777934936350777,
975
+ "grad_norm": 0.12858428061008453,
976
+ "learning_rate": 1.3860256808630428e-05,
977
+ "loss": 11.8168,
978
+ "step": 381
979
+ },
980
+ {
981
+ "epoch": 1.0862800565770863,
982
+ "grad_norm": 0.1034870445728302,
983
+ "learning_rate": 1.3202379370768252e-05,
984
+ "loss": 11.8184,
985
+ "step": 384
986
+ },
987
+ {
988
+ "epoch": 1.0947666195190948,
989
+ "grad_norm": 0.19289083778858185,
990
+ "learning_rate": 1.2558115014363592e-05,
991
+ "loss": 11.8211,
992
+ "step": 387
993
+ },
994
+ {
995
+ "epoch": 1.1032531824611032,
996
+ "grad_norm": 0.15419146418571472,
997
+ "learning_rate": 1.1927702081543279e-05,
998
+ "loss": 11.8142,
999
+ "step": 390
1000
+ },
1001
+ {
1002
+ "epoch": 1.1117397454031117,
1003
+ "grad_norm": 0.15567056834697723,
1004
+ "learning_rate": 1.1311373790174657e-05,
1005
+ "loss": 11.8155,
1006
+ "step": 393
1007
+ },
1008
+ {
1009
+ "epoch": 1.1202263083451203,
1010
+ "grad_norm": 0.13290712237358093,
1011
+ "learning_rate": 1.0709358147587884e-05,
1012
+ "loss": 11.8208,
1013
+ "step": 396
1014
+ },
1015
+ {
1016
+ "epoch": 1.1287128712871288,
1017
+ "grad_norm": 0.09749293327331543,
1018
+ "learning_rate": 1.0121877866225781e-05,
1019
+ "loss": 11.8177,
1020
+ "step": 399
1021
+ },
1022
+ {
1023
+ "epoch": 1.1371994342291372,
1024
+ "grad_norm": 0.120842345058918,
1025
+ "learning_rate": 9.549150281252633e-06,
1026
+ "loss": 11.8156,
1027
+ "step": 402
1028
+ },
1029
+ {
1030
+ "epoch": 1.1456859971711457,
1031
+ "grad_norm": 0.09248703718185425,
1032
+ "learning_rate": 8.991387270152201e-06,
1033
+ "loss": 11.8186,
1034
+ "step": 405
1035
+ },
1036
+ {
1037
+ "epoch": 1.154172560113154,
1038
+ "grad_norm": 0.12557213008403778,
1039
+ "learning_rate": 8.448795174344804e-06,
1040
+ "loss": 11.8199,
1041
+ "step": 408
1042
+ },
1043
+ {
1044
+ "epoch": 1.1626591230551626,
1045
+ "grad_norm": 0.17817071080207825,
1046
+ "learning_rate": 7.921574722852343e-06,
1047
+ "loss": 11.8154,
1048
+ "step": 411
1049
+ },
1050
+ {
1051
+ "epoch": 1.1711456859971712,
1052
+ "grad_norm": 0.10258757323026657,
1053
+ "learning_rate": 7.409920958039795e-06,
1054
+ "loss": 11.8124,
1055
+ "step": 414
1056
+ },
1057
+ {
1058
+ "epoch": 1.1796322489391797,
1059
+ "grad_norm": 0.16313178837299347,
1060
+ "learning_rate": 6.9140231634602485e-06,
1061
+ "loss": 11.815,
1062
+ "step": 417
1063
+ },
1064
+ {
1065
+ "epoch": 1.188118811881188,
1066
+ "grad_norm": 0.10050709545612335,
1067
+ "learning_rate": 6.43406479383053e-06,
1068
+ "loss": 11.8149,
1069
+ "step": 420
1070
+ },
1071
+ {
1072
+ "epoch": 1.188118811881188,
1073
+ "eval_loss": 11.817734718322754,
1074
+ "eval_runtime": 6.4421,
1075
+ "eval_samples_per_second": 92.516,
1076
+ "eval_steps_per_second": 11.642,
1077
+ "step": 420
1078
  }
1079
  ],
1080
  "logging_steps": 3,
 
1094
  "attributes": {}
1095
  }
1096
  },
1097
+ "total_flos": 2580649574400.0,
1098
  "train_batch_size": 8,
1099
  "trial_name": null,
1100
  "trial_params": null