masatochi commited on
Commit
8ca0b21
1 Parent(s): a7542f8

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c3b13f46fab857b7593afd293acbd2ecc359cd3fc048173aa1f2de4f4b2cda1
3
  size 48679352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74b7fdbe18f735663f945595812df22419ad68912867e25bbdd6ee5a590e16d7
3
  size 48679352
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c22804634ff2f696f79d4b967dad276d98877eb5c9ebb743f1e7aa2fbb8245a3
3
  size 25152500
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2b9195ba00ac4d43a2362a30d16aeb1985e0075fba4c979f363cd18ff10b87f
3
  size 25152500
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d725b764a9f7a07542bff2bd49e82f50571269a38edb42c4d371ddaac7f6a8e3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be9ec3198a68ec65f16686b0f1d5a6ba174d2aca875621b68207c99c69128ae9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9a25c72339c898b564e0c464a3f6fc75bbeec408008928b7ed05533156b98c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
- "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1420,6 +1420,41 @@
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 1.1014,
1422
  "step": 195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423
  }
1424
  ],
1425
  "logging_steps": 1,
@@ -1434,12 +1469,12 @@
1434
  "should_evaluate": false,
1435
  "should_log": false,
1436
  "should_save": true,
1437
- "should_training_stop": false
1438
  },
1439
  "attributes": {}
1440
  }
1441
  },
1442
- "total_flos": 3.255983853797376e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09780548933308882,
5
  "eval_steps": 34,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 1.1014,
1422
  "step": 195
1423
+ },
1424
+ {
1425
+ "epoch": 0.09584937954642704,
1426
+ "grad_norm": 0.4199082851409912,
1427
+ "learning_rate": 2.7308266142119785e-07,
1428
+ "loss": 1.0427,
1429
+ "step": 196
1430
+ },
1431
+ {
1432
+ "epoch": 0.09633840699309248,
1433
+ "grad_norm": 0.26134058833122253,
1434
+ "learning_rate": 1.5363960325660565e-07,
1435
+ "loss": 1.216,
1436
+ "step": 197
1437
+ },
1438
+ {
1439
+ "epoch": 0.09682743443975793,
1440
+ "grad_norm": 0.4043033719062805,
1441
+ "learning_rate": 6.829398569770939e-08,
1442
+ "loss": 1.0911,
1443
+ "step": 198
1444
+ },
1445
+ {
1446
+ "epoch": 0.09731646188642337,
1447
+ "grad_norm": 0.36261600255966187,
1448
+ "learning_rate": 1.7074954194729044e-08,
1449
+ "loss": 1.1118,
1450
+ "step": 199
1451
+ },
1452
+ {
1453
+ "epoch": 0.09780548933308882,
1454
+ "grad_norm": 0.44171595573425293,
1455
+ "learning_rate": 0.0,
1456
+ "loss": 1.0957,
1457
+ "step": 200
1458
  }
1459
  ],
1460
  "logging_steps": 1,
 
1469
  "should_evaluate": false,
1470
  "should_log": false,
1471
  "should_save": true,
1472
+ "should_training_stop": true
1473
  },
1474
  "attributes": {}
1475
  }
1476
  },
1477
+ "total_flos": 3.33947061927936e+17,
1478
  "train_batch_size": 3,
1479
  "trial_name": null,
1480
  "trial_params": null