Nexspear commited on
Commit
05d113e
·
verified ·
1 Parent(s): 650bffa

Training in progress, step 420, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77f85c8bc4ad212684b7993062a23525ac4f791b5229784b4158ac4bce165586
3
  size 100966336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e28db0f053f0469288d9c661a2de0e7721acd04f65d12b2744dda53823792f55
3
  size 100966336
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b37c1c4fa9a9e825ea05b1ca35dc1c4929d943b7bddd2f6086f57e6c5064ff4
3
  size 51613668
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e52fae042af788b20ffbe20c3dffe04e516add806bf2e66c036ba1becc61501
3
  size 51613668
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:875830942629bcea9e8c73e146b1df9f89fd36cd8b3e95d6764a6f4e78bd912d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79e5ef6a566f54a2b32cb3e8d5a68a7370551bdcc68ea3fc820d6c009103c0c9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49f8e14b456d7fc01f5ad21526616d8f86a1c6c40024725332f505f34df4d95a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7641dde43bc7a22d17d22ddcaa29ef3541065d43d71357b77f45ce61017cfec
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.056627092618254,
5
  "eval_steps": 42,
6
- "global_step": 378,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -969,6 +969,112 @@
969
  "eval_samples_per_second": 35.943,
970
  "eval_steps_per_second": 4.495,
971
  "step": 378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
972
  }
973
  ],
974
  "logging_steps": 3,
@@ -988,7 +1094,7 @@
988
  "attributes": {}
989
  }
990
  },
991
- "total_flos": 1.2131417357234995e+17,
992
  "train_batch_size": 8,
993
  "trial_name": null,
994
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.06291899179806,
5
  "eval_steps": 42,
6
+ "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
969
  "eval_samples_per_second": 35.943,
970
  "eval_steps_per_second": 4.495,
971
  "step": 378
972
+ },
973
+ {
974
+ "epoch": 0.05707651398824014,
975
+ "grad_norm": 0.17599642276763916,
976
+ "learning_rate": 6.930128404315214e-06,
977
+ "loss": 1.1339,
978
+ "step": 381
979
+ },
980
+ {
981
+ "epoch": 0.05752593535822628,
982
+ "grad_norm": 0.16360458731651306,
983
+ "learning_rate": 6.601189685384126e-06,
984
+ "loss": 1.1844,
985
+ "step": 384
986
+ },
987
+ {
988
+ "epoch": 0.05797535672821243,
989
+ "grad_norm": 0.1602054089307785,
990
+ "learning_rate": 6.279057507181796e-06,
991
+ "loss": 1.0835,
992
+ "step": 387
993
+ },
994
+ {
995
+ "epoch": 0.05842477809819857,
996
+ "grad_norm": 0.33288466930389404,
997
+ "learning_rate": 5.9638510407716394e-06,
998
+ "loss": 1.0366,
999
+ "step": 390
1000
+ },
1001
+ {
1002
+ "epoch": 0.058874199468184715,
1003
+ "grad_norm": 0.17551077902317047,
1004
+ "learning_rate": 5.655686895087329e-06,
1005
+ "loss": 1.0361,
1006
+ "step": 393
1007
+ },
1008
+ {
1009
+ "epoch": 0.05932362083817085,
1010
+ "grad_norm": 0.19817480444908142,
1011
+ "learning_rate": 5.354679073793942e-06,
1012
+ "loss": 1.1113,
1013
+ "step": 396
1014
+ },
1015
+ {
1016
+ "epoch": 0.059773042208157,
1017
+ "grad_norm": 0.18397966027259827,
1018
+ "learning_rate": 5.060938933112891e-06,
1019
+ "loss": 1.0809,
1020
+ "step": 399
1021
+ },
1022
+ {
1023
+ "epoch": 0.06022246357814314,
1024
+ "grad_norm": 0.18848171830177307,
1025
+ "learning_rate": 4.7745751406263165e-06,
1026
+ "loss": 1.0232,
1027
+ "step": 402
1028
+ },
1029
+ {
1030
+ "epoch": 0.060671884948129286,
1031
+ "grad_norm": 0.18873141705989838,
1032
+ "learning_rate": 4.495693635076101e-06,
1033
+ "loss": 1.0633,
1034
+ "step": 405
1035
+ },
1036
+ {
1037
+ "epoch": 0.06112130631811542,
1038
+ "grad_norm": 0.17433975636959076,
1039
+ "learning_rate": 4.224397587172402e-06,
1040
+ "loss": 1.0064,
1041
+ "step": 408
1042
+ },
1043
+ {
1044
+ "epoch": 0.06157072768810157,
1045
+ "grad_norm": 0.2025955468416214,
1046
+ "learning_rate": 3.9607873614261715e-06,
1047
+ "loss": 1.1142,
1048
+ "step": 411
1049
+ },
1050
+ {
1051
+ "epoch": 0.06202014905808771,
1052
+ "grad_norm": 0.1579374223947525,
1053
+ "learning_rate": 3.7049604790198976e-06,
1054
+ "loss": 1.0722,
1055
+ "step": 414
1056
+ },
1057
+ {
1058
+ "epoch": 0.062469570428073856,
1059
+ "grad_norm": 0.15210741758346558,
1060
+ "learning_rate": 3.4570115817301243e-06,
1061
+ "loss": 1.011,
1062
+ "step": 417
1063
+ },
1064
+ {
1065
+ "epoch": 0.06291899179806,
1066
+ "grad_norm": 0.21344028413295746,
1067
+ "learning_rate": 3.217032396915265e-06,
1068
+ "loss": 1.1333,
1069
+ "step": 420
1070
+ },
1071
+ {
1072
+ "epoch": 0.06291899179806,
1073
+ "eval_loss": 1.0891081094741821,
1074
+ "eval_runtime": 312.8277,
1075
+ "eval_samples_per_second": 35.94,
1076
+ "eval_steps_per_second": 4.494,
1077
+ "step": 420
1078
  }
1079
  ],
1080
  "logging_steps": 3,
 
1094
  "attributes": {}
1095
  }
1096
  },
1097
+ "total_flos": 1.3537807518872371e+17,
1098
  "train_batch_size": 8,
1099
  "trial_name": null,
1100
  "trial_params": null