farmery commited on
Commit
2b3fe7e
·
verified ·
1 Parent(s): 9853673

Training in progress, step 500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37cf3369fc4b4dddec6b9e317be776ded9712a8b15466e0d98ba0bf3e66afe57
3
  size 251748704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7e2146353d9fd2a532755bf7672797e1ed57b0b3e93e08037d7e993489d6d6
3
  size 251748704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83bd32947a229874ca5ba692cc6fb122702696c30165573bcc776cd8f51d33ec
3
  size 128585300
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18a6179f15e70cb7394c34d0273c738d6d74622f6be61454e60462c3ccb73981
3
  size 128585300
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ad549467aee21b1d7841355db396e8f80727b2cce0c91d316fca55cfd3f145e
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57cb7d09666fef2e8e761f6393442ac69abb04449214d7bb5b7350c3a8e5782e
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6aabdae8fa17d1f82073d1f2d4117f3b2c974d593012d8b40bf05683705e1d30
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad0704765a7d399647cc820ef5379f92859ca86bec18c0f64043e248a12aa6f
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89849a77ec6561cbbf56f2e2c9c58d90b30d57d1de75d3864248f6cbf0735630
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ead883fd0847181215695597ad4b86ee178fbfd5e61058420e28111eb302c6f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdb8f124b2e4b7af08f331fbd2f11e354c981be3f4ab87e1dc330486d9d9d31d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d977c0648dd0b7c55de3e6a8c311c70942568b8992246f8fa43bef1aca958d
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9a75ab01cc15879cd61ff8e586fb370a9b8a51bf7b319e44e27d87274e2e703
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e87d47519e32af310d5ce258e0454d331221391a1b9ee9143859120658f7a9a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.6267605633802817,
5
  "eval_steps": 42,
6
- "global_step": 462,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1181,6 +1181,90 @@
1181
  "eval_samples_per_second": 57.055,
1182
  "eval_steps_per_second": 1.789,
1183
  "step": 462
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184
  }
1185
  ],
1186
  "logging_steps": 3,
@@ -1195,12 +1279,12 @@
1195
  "should_evaluate": false,
1196
  "should_log": false,
1197
  "should_save": true,
1198
- "should_training_stop": false
1199
  },
1200
  "attributes": {}
1201
  }
1202
  },
1203
- "total_flos": 3.874337732541022e+18,
1204
  "train_batch_size": 8,
1205
  "trial_name": null,
1206
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.76056338028169,
5
  "eval_steps": 42,
6
+ "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1181
  "eval_samples_per_second": 57.055,
1182
  "eval_steps_per_second": 1.789,
1183
  "step": 462
1184
+ },
1185
+ {
1186
+ "epoch": 1.637323943661972,
1187
+ "grad_norm": 1.6809414625167847,
1188
+ "learning_rate": 6.268021954544096e-07,
1189
+ "loss": 0.4512,
1190
+ "step": 465
1191
+ },
1192
+ {
1193
+ "epoch": 1.647887323943662,
1194
+ "grad_norm": 1.5099436044692993,
1195
+ "learning_rate": 5.243166291926782e-07,
1196
+ "loss": 0.4482,
1197
+ "step": 468
1198
+ },
1199
+ {
1200
+ "epoch": 1.658450704225352,
1201
+ "grad_norm": 1.7300583124160767,
1202
+ "learning_rate": 4.308857100999042e-07,
1203
+ "loss": 0.465,
1204
+ "step": 471
1205
+ },
1206
+ {
1207
+ "epoch": 1.6690140845070423,
1208
+ "grad_norm": 1.4459805488586426,
1209
+ "learning_rate": 3.465440024411265e-07,
1210
+ "loss": 0.4559,
1211
+ "step": 474
1212
+ },
1213
+ {
1214
+ "epoch": 1.6795774647887325,
1215
+ "grad_norm": 1.5714209079742432,
1216
+ "learning_rate": 2.7132270797659563e-07,
1217
+ "loss": 0.4662,
1218
+ "step": 477
1219
+ },
1220
+ {
1221
+ "epoch": 1.6901408450704225,
1222
+ "grad_norm": 1.716829776763916,
1223
+ "learning_rate": 2.052496544188487e-07,
1224
+ "loss": 0.4866,
1225
+ "step": 480
1226
+ },
1227
+ {
1228
+ "epoch": 1.7007042253521125,
1229
+ "grad_norm": 1.6884804964065552,
1230
+ "learning_rate": 1.483492851379914e-07,
1231
+ "loss": 0.4441,
1232
+ "step": 483
1233
+ },
1234
+ {
1235
+ "epoch": 1.711267605633803,
1236
+ "grad_norm": 1.439448356628418,
1237
+ "learning_rate": 1.006426501190233e-07,
1238
+ "loss": 0.4588,
1239
+ "step": 486
1240
+ },
1241
+ {
1242
+ "epoch": 1.721830985915493,
1243
+ "grad_norm": 1.6845688819885254,
1244
+ "learning_rate": 6.214739817448633e-08,
1245
+ "loss": 0.4704,
1246
+ "step": 489
1247
+ },
1248
+ {
1249
+ "epoch": 1.732394366197183,
1250
+ "grad_norm": 2.208343029022217,
1251
+ "learning_rate": 3.287777041539042e-08,
1252
+ "loss": 0.469,
1253
+ "step": 492
1254
+ },
1255
+ {
1256
+ "epoch": 1.7429577464788732,
1257
+ "grad_norm": 1.6495448350906372,
1258
+ "learning_rate": 1.284459498280266e-08,
1259
+ "loss": 0.4331,
1260
+ "step": 495
1261
+ },
1262
+ {
1263
+ "epoch": 1.7535211267605635,
1264
+ "grad_norm": 1.6916940212249756,
1265
+ "learning_rate": 2.055283042018408e-09,
1266
+ "loss": 0.466,
1267
+ "step": 498
1268
  }
1269
  ],
1270
  "logging_steps": 3,
 
1279
  "should_evaluate": false,
1280
  "should_log": false,
1281
  "should_save": true,
1282
+ "should_training_stop": true
1283
  },
1284
  "attributes": {}
1285
  }
1286
  },
1287
+ "total_flos": 4.193006204764553e+18,
1288
  "train_batch_size": 8,
1289
  "trial_name": null,
1290
  "trial_params": null