masatochi commited on
Commit
65fdd1b
1 Parent(s): 3f5544a

Training in progress, step 195, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8318c9dddfde2626e16874ceab9b5d73d41a57875ba111796e77fac52c497fe2
3
  size 48679352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2677dd4a92b174119d84e1626685f2540ff3efdaac5ef15f2eefc7965415be8
3
  size 48679352
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9dd6070eee2e88beac2318243b02e70f68a06b1d2ce5278e1d7a89b7b0a14e01
3
  size 25152500
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa9ee12d2d1983b469d62c943ef7e32af98d0056c03fb247b708a5b815a83e52
3
  size 25152500
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c22a61a5b47d1b3a3a2eb5d25761bba9dd7d0f656b8993398fcde00940b097b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d725b764a9f7a07542bff2bd49e82f50571269a38edb42c4d371ddaac7f6a8e3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b289b438571df9d34409287e67864402aaad98d9ecdf87ccd44b9abb7f5b6982
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09291521486643438,
5
  "eval_steps": 34,
6
- "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1385,6 +1385,41 @@
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 1.1748,
1387
  "step": 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
  }
1389
  ],
1390
  "logging_steps": 1,
@@ -1404,7 +1439,7 @@
1404
  "attributes": {}
1405
  }
1406
  },
1407
- "total_flos": 3.172497088315392e+17,
1408
  "train_batch_size": 3,
1409
  "trial_name": null,
1410
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
+ "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 1.1748,
1387
  "step": 190
1388
+ },
1389
+ {
1390
+ "epoch": 0.09340424231309982,
1391
+ "grad_norm": 0.2973954677581787,
1392
+ "learning_rate": 1.3799252646597426e-06,
1393
+ "loss": 1.2553,
1394
+ "step": 191
1395
+ },
1396
+ {
1397
+ "epoch": 0.09389326975976527,
1398
+ "grad_norm": 0.30735060572624207,
1399
+ "learning_rate": 1.0908391628854041e-06,
1400
+ "loss": 1.0603,
1401
+ "step": 192
1402
+ },
1403
+ {
1404
+ "epoch": 0.09438229720643071,
1405
+ "grad_norm": 0.39257532358169556,
1406
+ "learning_rate": 8.355304489257254e-07,
1407
+ "loss": 1.0304,
1408
+ "step": 193
1409
+ },
1410
+ {
1411
+ "epoch": 0.09487132465309615,
1412
+ "grad_norm": 0.31010863184928894,
1413
+ "learning_rate": 6.140863104726391e-07,
1414
+ "loss": 1.0806,
1415
+ "step": 194
1416
+ },
1417
+ {
1418
+ "epoch": 0.0953603520997616,
1419
+ "grad_norm": 0.2375696897506714,
1420
+ "learning_rate": 4.2658237049655323e-07,
1421
+ "loss": 1.1009,
1422
+ "step": 195
1423
  }
1424
  ],
1425
  "logging_steps": 1,
 
1439
  "attributes": {}
1440
  }
1441
  },
1442
+ "total_flos": 3.255983853797376e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null