masatochi commited on
Commit
1ac06cf
1 Parent(s): d58d03a

Training in progress, step 190, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:776cec9cf12dba4edbc666da11f20f14eff4789b304a641b85c8b02060d2f9fc
3
  size 48679352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8318c9dddfde2626e16874ceab9b5d73d41a57875ba111796e77fac52c497fe2
3
  size 48679352
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84601093108a66d9bc78ce4988113205ced8e3faec71d2128b66c8bf76bb4bff
3
  size 25152500
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dd6070eee2e88beac2318243b02e70f68a06b1d2ce5278e1d7a89b7b0a14e01
3
  size 25152500
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7ba1183a4e6a2706678b09a9085a1829e9abfdfbc778423e9dab06b73f84622
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c22a61a5b47d1b3a3a2eb5d25761bba9dd7d0f656b8993398fcde00940b097b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:31cc4c125027b06153274d1c1fcc2291ff49e04af7d8c2cae65dd480bfd90a0c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b289b438571df9d34409287e67864402aaad98d9ecdf87ccd44b9abb7f5b6982
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09047007763310716,
5
  "eval_steps": 34,
6
- "global_step": 185,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1350,6 +1350,41 @@
1350
  "learning_rate": 3.817435682718096e-06,
1351
  "loss": 1.0669,
1352
  "step": 185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353
  }
1354
  ],
1355
  "logging_steps": 1,
@@ -1369,7 +1404,7 @@
1369
  "attributes": {}
1370
  }
1371
  },
1372
- "total_flos": 3.089010322833408e+17,
1373
  "train_batch_size": 3,
1374
  "trial_name": null,
1375
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09291521486643438,
5
  "eval_steps": 34,
6
+ "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1350
  "learning_rate": 3.817435682718096e-06,
1351
  "loss": 1.0669,
1352
  "step": 185
1353
+ },
1354
+ {
1355
+ "epoch": 0.0909591050797726,
1356
+ "grad_norm": 0.2308908998966217,
1357
+ "learning_rate": 3.3281595730812575e-06,
1358
+ "loss": 1.0341,
1359
+ "step": 186
1360
+ },
1361
+ {
1362
+ "epoch": 0.09144813252643805,
1363
+ "grad_norm": 0.28636959195137024,
1364
+ "learning_rate": 2.8718968083886075e-06,
1365
+ "loss": 1.2459,
1366
+ "step": 187
1367
+ },
1368
+ {
1369
+ "epoch": 0.09193715997310349,
1370
+ "grad_norm": 0.2441776990890503,
1371
+ "learning_rate": 2.4488032019563402e-06,
1372
+ "loss": 1.0977,
1373
+ "step": 188
1374
+ },
1375
+ {
1376
+ "epoch": 0.09242618741976893,
1377
+ "grad_norm": 0.4734705686569214,
1378
+ "learning_rate": 2.0590232398634114e-06,
1379
+ "loss": 1.0091,
1380
+ "step": 189
1381
+ },
1382
+ {
1383
+ "epoch": 0.09291521486643438,
1384
+ "grad_norm": 0.32178983092308044,
1385
+ "learning_rate": 1.7026900316098215e-06,
1386
+ "loss": 1.1748,
1387
+ "step": 190
1388
  }
1389
  ],
1390
  "logging_steps": 1,
 
1404
  "attributes": {}
1405
  }
1406
  },
1407
+ "total_flos": 3.172497088315392e+17,
1408
  "train_batch_size": 3,
1409
  "trial_name": null,
1410
  "trial_params": null