leixa commited on
Commit
7be98e5
·
verified ·
1 Parent(s): d60f4d5

Training in progress, step 462, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c65f2723695d55c4676894a4cc573fa60c43c6f9d8e6b10a5f8338a48cb5495
3
  size 93608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df0e540779c16a9c9b8e0788020851b763c6ccde82f230184c6995b221b6b636
3
  size 93608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d0f7e9bc2d46e8ee5840e716e44a1192336abc75f777e9c4ef214dc13e626af
3
  size 197158
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d03b8186e15e8d8547427ceb0c67f3e312c09939c3148d3ddf48c2bbfe1bde0
3
  size 197158
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cdfe269170b74b584fdf82fb7f4fe475adef4c7006c041ed6cb8c1885b4ecaa
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c51931e2d3751996d51194bb17f4339acd99c19e556e5384d965eaa00841d426
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9507fdefdfac1d5dbc7a23a3aeb675b6dd3cc22a3762b7e85ff02a1c9c43105
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b58b44a2d5024ddc12e64ead45d5d25c7fc985d9aaeb44c7bc3de9b8cf56f23
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.188118811881188,
5
  "eval_steps": 42,
6
- "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,6 +1075,112 @@
1075
  "eval_samples_per_second": 92.516,
1076
  "eval_steps_per_second": 11.642,
1077
  "step": 420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 3,
@@ -1094,7 +1200,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 2580649574400.0,
1098
  "train_batch_size": 8,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.306930693069307,
5
  "eval_steps": 42,
6
+ "global_step": 462,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_samples_per_second": 92.516,
1076
  "eval_steps_per_second": 11.642,
1077
  "step": 420
1078
+ },
1079
+ {
1080
+ "epoch": 1.1966053748231966,
1081
+ "grad_norm": 0.09405164420604706,
1082
+ "learning_rate": 5.9702234071631e-06,
1083
+ "loss": 11.8103,
1084
+ "step": 423
1085
+ },
1086
+ {
1087
+ "epoch": 1.2050919377652052,
1088
+ "grad_norm": 0.07892228662967682,
1089
+ "learning_rate": 5.5226705990794155e-06,
1090
+ "loss": 11.8166,
1091
+ "step": 426
1092
+ },
1093
+ {
1094
+ "epoch": 1.2135785007072135,
1095
+ "grad_norm": 0.09944994747638702,
1096
+ "learning_rate": 5.091571939329048e-06,
1097
+ "loss": 11.8157,
1098
+ "step": 429
1099
+ },
1100
+ {
1101
+ "epoch": 1.222065063649222,
1102
+ "grad_norm": 0.09870926290750504,
1103
+ "learning_rate": 4.677086910538092e-06,
1104
+ "loss": 11.8141,
1105
+ "step": 432
1106
+ },
1107
+ {
1108
+ "epoch": 1.2305516265912306,
1109
+ "grad_norm": 0.16009728610515594,
1110
+ "learning_rate": 4.279368849209381e-06,
1111
+ "loss": 11.8126,
1112
+ "step": 435
1113
+ },
1114
+ {
1115
+ "epoch": 1.239038189533239,
1116
+ "grad_norm": 0.10148247331380844,
1117
+ "learning_rate": 3.898564888996476e-06,
1118
+ "loss": 11.8173,
1119
+ "step": 438
1120
+ },
1121
+ {
1122
+ "epoch": 1.2475247524752475,
1123
+ "grad_norm": 0.057802699506282806,
1124
+ "learning_rate": 3.534815906272404e-06,
1125
+ "loss": 11.8136,
1126
+ "step": 441
1127
+ },
1128
+ {
1129
+ "epoch": 1.256011315417256,
1130
+ "grad_norm": 0.059853699058294296,
1131
+ "learning_rate": 3.18825646801314e-06,
1132
+ "loss": 11.8178,
1133
+ "step": 444
1134
+ },
1135
+ {
1136
+ "epoch": 1.2644978783592644,
1137
+ "grad_norm": 0.11142271012067795,
1138
+ "learning_rate": 2.8590147820153513e-06,
1139
+ "loss": 11.8121,
1140
+ "step": 447
1141
+ },
1142
+ {
1143
+ "epoch": 1.272984441301273,
1144
+ "grad_norm": 0.12580835819244385,
1145
+ "learning_rate": 2.547212649466568e-06,
1146
+ "loss": 11.8092,
1147
+ "step": 450
1148
+ },
1149
+ {
1150
+ "epoch": 1.2814710042432815,
1151
+ "grad_norm": 0.09984395653009415,
1152
+ "learning_rate": 2.2529654198854835e-06,
1153
+ "loss": 11.8177,
1154
+ "step": 453
1155
+ },
1156
+ {
1157
+ "epoch": 1.28995756718529,
1158
+ "grad_norm": 0.06916932761669159,
1159
+ "learning_rate": 1.9763819484490355e-06,
1160
+ "loss": 11.8155,
1161
+ "step": 456
1162
+ },
1163
+ {
1164
+ "epoch": 1.2984441301272984,
1165
+ "grad_norm": 0.15921108424663544,
1166
+ "learning_rate": 1.7175645557220566e-06,
1167
+ "loss": 11.813,
1168
+ "step": 459
1169
+ },
1170
+ {
1171
+ "epoch": 1.306930693069307,
1172
+ "grad_norm": 0.08516174554824829,
1173
+ "learning_rate": 1.4766089898042678e-06,
1174
+ "loss": 11.81,
1175
+ "step": 462
1176
+ },
1177
+ {
1178
+ "epoch": 1.306930693069307,
1179
+ "eval_loss": 11.817733764648438,
1180
+ "eval_runtime": 6.4426,
1181
+ "eval_samples_per_second": 92.509,
1182
+ "eval_steps_per_second": 11.641,
1183
+ "step": 462
1184
  }
1185
  ],
1186
  "logging_steps": 3,
 
1200
  "attributes": {}
1201
  }
1202
  },
1203
+ "total_flos": 2832537845760.0,
1204
  "train_batch_size": 8,
1205
  "trial_name": null,
1206
  "trial_params": null