fats-fme commited on
Commit
2ad8d6e
·
verified ·
1 Parent(s): d39f215

Training in progress, step 230, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abcde8ac3d04c98a861549359d1224345d51b37788f3b8a385d055e42467481e
3
  size 97307544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0058ffcc6efb0fb8024e032406ca7c2f41c337a02c7938182abbadd8bc3bbe69
3
  size 97307544
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e12dbbc77c83e3643e10bdd747b43283d299a4a0443c375284bff3cb8b034d78
3
  size 194840426
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ae8a3d475d446a27ef9f13a9b5ff0498ac5d8f241b076b83b5a02fb69d53de8
3
  size 194840426
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6885c98f9944031b1d47a617f1a2d46af56909da93ca8c4ac4a873f90d3142fe
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03a2f97aaf17614b5db2c4dbff2ea61103afa8e120a960928e004cec7d90f368
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3901384dd9ab7f4272cbe89ec0e7d7be7b55f7e04d725cfcd750d27555d4c8c0
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a68f41bd8b3d42d059b16bec1fc1ddadc36342efa5d605abed6afa295e695d1
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1cfc363eda5dfe78796b361134c848de53d3bd2047f481ddb99265e158e573b4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caf89d0fd7dd5ddc878456c8fb1b50160ea7c7e2397f17c1abfe2701415a1a4d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.754880694143167,
5
  "eval_steps": 58,
6
- "global_step": 174,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1257,6 +1257,398 @@
1257
  "eval_samples_per_second": 8.843,
1258
  "eval_steps_per_second": 2.228,
1259
  "step": 174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1260
  }
1261
  ],
1262
  "logging_steps": 1,
@@ -1271,12 +1663,12 @@
1271
  "should_evaluate": false,
1272
  "should_log": false,
1273
  "should_save": true,
1274
- "should_training_stop": false
1275
  },
1276
  "attributes": {}
1277
  }
1278
  },
1279
- "total_flos": 3.8904213574936166e+17,
1280
  "train_batch_size": 2,
1281
  "trial_name": null,
1282
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9978308026030369,
5
  "eval_steps": 58,
6
+ "global_step": 230,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1257
  "eval_samples_per_second": 8.843,
1258
  "eval_steps_per_second": 2.228,
1259
  "step": 174
1260
+ },
1261
+ {
1262
+ "epoch": 0.7592190889370932,
1263
+ "grad_norm": 3.558817148208618,
1264
+ "learning_rate": 4.264235636489542e-05,
1265
+ "loss": 3.0184,
1266
+ "step": 175
1267
+ },
1268
+ {
1269
+ "epoch": 0.7635574837310195,
1270
+ "grad_norm": 0.48007091879844666,
1271
+ "learning_rate": 4.12214747707527e-05,
1272
+ "loss": 3.0708,
1273
+ "step": 176
1274
+ },
1275
+ {
1276
+ "epoch": 0.7678958785249458,
1277
+ "grad_norm": 0.606035590171814,
1278
+ "learning_rate": 3.981849768479517e-05,
1279
+ "loss": 3.1584,
1280
+ "step": 177
1281
+ },
1282
+ {
1283
+ "epoch": 0.7722342733188721,
1284
+ "grad_norm": 0.6647348999977112,
1285
+ "learning_rate": 3.843385246743417e-05,
1286
+ "loss": 3.3003,
1287
+ "step": 178
1288
+ },
1289
+ {
1290
+ "epoch": 0.7765726681127982,
1291
+ "grad_norm": 0.6956514120101929,
1292
+ "learning_rate": 3.7067960895016275e-05,
1293
+ "loss": 3.2168,
1294
+ "step": 179
1295
+ },
1296
+ {
1297
+ "epoch": 0.7809110629067245,
1298
+ "grad_norm": 0.7575390338897705,
1299
+ "learning_rate": 3.5721239031346066e-05,
1300
+ "loss": 3.2576,
1301
+ "step": 180
1302
+ },
1303
+ {
1304
+ "epoch": 0.7852494577006508,
1305
+ "grad_norm": 0.8106915950775146,
1306
+ "learning_rate": 3.439409710094929e-05,
1307
+ "loss": 3.124,
1308
+ "step": 181
1309
+ },
1310
+ {
1311
+ "epoch": 0.789587852494577,
1312
+ "grad_norm": 0.873997688293457,
1313
+ "learning_rate": 3.308693936411421e-05,
1314
+ "loss": 3.0977,
1315
+ "step": 182
1316
+ },
1317
+ {
1318
+ "epoch": 0.7939262472885033,
1319
+ "grad_norm": 0.9612168073654175,
1320
+ "learning_rate": 3.1800163993750166e-05,
1321
+ "loss": 3.435,
1322
+ "step": 183
1323
+ },
1324
+ {
1325
+ "epoch": 0.7982646420824295,
1326
+ "grad_norm": 0.9549990892410278,
1327
+ "learning_rate": 3.053416295410026e-05,
1328
+ "loss": 3.2216,
1329
+ "step": 184
1330
+ },
1331
+ {
1332
+ "epoch": 0.8026030368763557,
1333
+ "grad_norm": 0.9309582710266113,
1334
+ "learning_rate": 2.9289321881345254e-05,
1335
+ "loss": 3.0546,
1336
+ "step": 185
1337
+ },
1338
+ {
1339
+ "epoch": 0.806941431670282,
1340
+ "grad_norm": 1.0800687074661255,
1341
+ "learning_rate": 2.8066019966134904e-05,
1342
+ "loss": 3.2283,
1343
+ "step": 186
1344
+ },
1345
+ {
1346
+ "epoch": 0.8112798264642083,
1347
+ "grad_norm": 1.0009733438491821,
1348
+ "learning_rate": 2.6864629838082956e-05,
1349
+ "loss": 3.1638,
1350
+ "step": 187
1351
+ },
1352
+ {
1353
+ "epoch": 0.8156182212581344,
1354
+ "grad_norm": 1.1134998798370361,
1355
+ "learning_rate": 2.5685517452260567e-05,
1356
+ "loss": 3.2642,
1357
+ "step": 188
1358
+ },
1359
+ {
1360
+ "epoch": 0.8199566160520607,
1361
+ "grad_norm": 1.1395593881607056,
1362
+ "learning_rate": 2.45290419777228e-05,
1363
+ "loss": 3.0712,
1364
+ "step": 189
1365
+ },
1366
+ {
1367
+ "epoch": 0.824295010845987,
1368
+ "grad_norm": 1.1547160148620605,
1369
+ "learning_rate": 2.339555568810221e-05,
1370
+ "loss": 3.2101,
1371
+ "step": 190
1372
+ },
1373
+ {
1374
+ "epoch": 0.8286334056399133,
1375
+ "grad_norm": 1.2223323583602905,
1376
+ "learning_rate": 2.2285403854302912e-05,
1377
+ "loss": 3.1109,
1378
+ "step": 191
1379
+ },
1380
+ {
1381
+ "epoch": 0.8329718004338394,
1382
+ "grad_norm": 1.4417051076889038,
1383
+ "learning_rate": 2.119892463932781e-05,
1384
+ "loss": 3.2497,
1385
+ "step": 192
1386
+ },
1387
+ {
1388
+ "epoch": 0.8373101952277657,
1389
+ "grad_norm": 1.3542780876159668,
1390
+ "learning_rate": 2.013644899527074e-05,
1391
+ "loss": 3.23,
1392
+ "step": 193
1393
+ },
1394
+ {
1395
+ "epoch": 0.841648590021692,
1396
+ "grad_norm": 1.5529882907867432,
1397
+ "learning_rate": 1.9098300562505266e-05,
1398
+ "loss": 3.2404,
1399
+ "step": 194
1400
+ },
1401
+ {
1402
+ "epoch": 0.8459869848156182,
1403
+ "grad_norm": 1.63187575340271,
1404
+ "learning_rate": 1.808479557110081e-05,
1405
+ "loss": 3.0776,
1406
+ "step": 195
1407
+ },
1408
+ {
1409
+ "epoch": 0.8503253796095445,
1410
+ "grad_norm": 1.6470518112182617,
1411
+ "learning_rate": 1.7096242744495837e-05,
1412
+ "loss": 3.1312,
1413
+ "step": 196
1414
+ },
1415
+ {
1416
+ "epoch": 0.8546637744034707,
1417
+ "grad_norm": 1.8358676433563232,
1418
+ "learning_rate": 1.6132943205457606e-05,
1419
+ "loss": 3.0136,
1420
+ "step": 197
1421
+ },
1422
+ {
1423
+ "epoch": 0.8590021691973969,
1424
+ "grad_norm": 2.2392208576202393,
1425
+ "learning_rate": 1.5195190384357404e-05,
1426
+ "loss": 3.0873,
1427
+ "step": 198
1428
+ },
1429
+ {
1430
+ "epoch": 0.8633405639913232,
1431
+ "grad_norm": 2.3587329387664795,
1432
+ "learning_rate": 1.4283269929788779e-05,
1433
+ "loss": 3.1336,
1434
+ "step": 199
1435
+ },
1436
+ {
1437
+ "epoch": 0.8676789587852495,
1438
+ "grad_norm": 3.4689748287200928,
1439
+ "learning_rate": 1.339745962155613e-05,
1440
+ "loss": 3.2269,
1441
+ "step": 200
1442
+ },
1443
+ {
1444
+ "epoch": 0.8720173535791758,
1445
+ "grad_norm": 0.4676075577735901,
1446
+ "learning_rate": 1.2538029286060426e-05,
1447
+ "loss": 3.2194,
1448
+ "step": 201
1449
+ },
1450
+ {
1451
+ "epoch": 0.8763557483731019,
1452
+ "grad_norm": 0.5948041081428528,
1453
+ "learning_rate": 1.1705240714107302e-05,
1454
+ "loss": 3.2006,
1455
+ "step": 202
1456
+ },
1457
+ {
1458
+ "epoch": 0.8806941431670282,
1459
+ "grad_norm": 0.6200747489929199,
1460
+ "learning_rate": 1.0899347581163221e-05,
1461
+ "loss": 3.1966,
1462
+ "step": 203
1463
+ },
1464
+ {
1465
+ "epoch": 0.8850325379609545,
1466
+ "grad_norm": 0.6264815926551819,
1467
+ "learning_rate": 1.0120595370083318e-05,
1468
+ "loss": 3.1552,
1469
+ "step": 204
1470
+ },
1471
+ {
1472
+ "epoch": 0.8893709327548807,
1473
+ "grad_norm": 0.6958035230636597,
1474
+ "learning_rate": 9.369221296335006e-06,
1475
+ "loss": 3.21,
1476
+ "step": 205
1477
+ },
1478
+ {
1479
+ "epoch": 0.8937093275488069,
1480
+ "grad_norm": 0.7550477981567383,
1481
+ "learning_rate": 8.645454235739903e-06,
1482
+ "loss": 3.2491,
1483
+ "step": 206
1484
+ },
1485
+ {
1486
+ "epoch": 0.8980477223427332,
1487
+ "grad_norm": 0.78013014793396,
1488
+ "learning_rate": 7.949514654755962e-06,
1489
+ "loss": 3.158,
1490
+ "step": 207
1491
+ },
1492
+ {
1493
+ "epoch": 0.9023861171366594,
1494
+ "grad_norm": 0.786949098110199,
1495
+ "learning_rate": 7.281614543321269e-06,
1496
+ "loss": 3.2446,
1497
+ "step": 208
1498
+ },
1499
+ {
1500
+ "epoch": 0.9067245119305857,
1501
+ "grad_norm": 0.8102577924728394,
1502
+ "learning_rate": 6.6419573502798374e-06,
1503
+ "loss": 3.2238,
1504
+ "step": 209
1505
+ },
1506
+ {
1507
+ "epoch": 0.911062906724512,
1508
+ "grad_norm": 0.8839837908744812,
1509
+ "learning_rate": 6.030737921409169e-06,
1510
+ "loss": 3.1035,
1511
+ "step": 210
1512
+ },
1513
+ {
1514
+ "epoch": 0.9154013015184381,
1515
+ "grad_norm": 0.9286414980888367,
1516
+ "learning_rate": 5.448142440068316e-06,
1517
+ "loss": 3.2198,
1518
+ "step": 211
1519
+ },
1520
+ {
1521
+ "epoch": 0.9197396963123644,
1522
+ "grad_norm": 1.031367540359497,
1523
+ "learning_rate": 4.8943483704846475e-06,
1524
+ "loss": 3.207,
1525
+ "step": 212
1526
+ },
1527
+ {
1528
+ "epoch": 0.9240780911062907,
1529
+ "grad_norm": 1.1086468696594238,
1530
+ "learning_rate": 4.369524403696457e-06,
1531
+ "loss": 3.2715,
1532
+ "step": 213
1533
+ },
1534
+ {
1535
+ "epoch": 0.928416485900217,
1536
+ "grad_norm": 1.0586810111999512,
1537
+ "learning_rate": 3.873830406168111e-06,
1538
+ "loss": 3.2091,
1539
+ "step": 214
1540
+ },
1541
+ {
1542
+ "epoch": 0.9327548806941431,
1543
+ "grad_norm": 1.0433012247085571,
1544
+ "learning_rate": 3.40741737109318e-06,
1545
+ "loss": 3.11,
1546
+ "step": 215
1547
+ },
1548
+ {
1549
+ "epoch": 0.9370932754880694,
1550
+ "grad_norm": 1.214693546295166,
1551
+ "learning_rate": 2.970427372400353e-06,
1552
+ "loss": 3.1984,
1553
+ "step": 216
1554
+ },
1555
+ {
1556
+ "epoch": 0.9414316702819957,
1557
+ "grad_norm": 1.3140201568603516,
1558
+ "learning_rate": 2.5629935214764865e-06,
1559
+ "loss": 3.1381,
1560
+ "step": 217
1561
+ },
1562
+ {
1563
+ "epoch": 0.9457700650759219,
1564
+ "grad_norm": 1.439610242843628,
1565
+ "learning_rate": 2.1852399266194314e-06,
1566
+ "loss": 3.2828,
1567
+ "step": 218
1568
+ },
1569
+ {
1570
+ "epoch": 0.9501084598698482,
1571
+ "grad_norm": 1.447763204574585,
1572
+ "learning_rate": 1.8372816552336026e-06,
1573
+ "loss": 3.1896,
1574
+ "step": 219
1575
+ },
1576
+ {
1577
+ "epoch": 0.9544468546637744,
1578
+ "grad_norm": 1.5651224851608276,
1579
+ "learning_rate": 1.5192246987791981e-06,
1580
+ "loss": 3.0176,
1581
+ "step": 220
1582
+ },
1583
+ {
1584
+ "epoch": 0.9587852494577006,
1585
+ "grad_norm": 1.7138340473175049,
1586
+ "learning_rate": 1.231165940486234e-06,
1587
+ "loss": 3.0649,
1588
+ "step": 221
1589
+ },
1590
+ {
1591
+ "epoch": 0.9631236442516269,
1592
+ "grad_norm": 1.7278990745544434,
1593
+ "learning_rate": 9.731931258429638e-07,
1594
+ "loss": 2.901,
1595
+ "step": 222
1596
+ },
1597
+ {
1598
+ "epoch": 0.9674620390455532,
1599
+ "grad_norm": 1.8585275411605835,
1600
+ "learning_rate": 7.453848358678017e-07,
1601
+ "loss": 2.9228,
1602
+ "step": 223
1603
+ },
1604
+ {
1605
+ "epoch": 0.9718004338394793,
1606
+ "grad_norm": 2.438549757003784,
1607
+ "learning_rate": 5.478104631726711e-07,
1608
+ "loss": 2.9518,
1609
+ "step": 224
1610
+ },
1611
+ {
1612
+ "epoch": 0.9761388286334056,
1613
+ "grad_norm": 3.6199636459350586,
1614
+ "learning_rate": 3.805301908254455e-07,
1615
+ "loss": 2.9323,
1616
+ "step": 225
1617
+ },
1618
+ {
1619
+ "epoch": 0.9804772234273319,
1620
+ "grad_norm": 0.5968942046165466,
1621
+ "learning_rate": 2.4359497401758024e-07,
1622
+ "loss": 3.0911,
1623
+ "step": 226
1624
+ },
1625
+ {
1626
+ "epoch": 0.9848156182212582,
1627
+ "grad_norm": 0.8684574365615845,
1628
+ "learning_rate": 1.3704652454261668e-07,
1629
+ "loss": 3.2219,
1630
+ "step": 227
1631
+ },
1632
+ {
1633
+ "epoch": 0.9891540130151844,
1634
+ "grad_norm": 1.1482932567596436,
1635
+ "learning_rate": 6.09172980904238e-08,
1636
+ "loss": 3.0405,
1637
+ "step": 228
1638
+ },
1639
+ {
1640
+ "epoch": 0.9934924078091106,
1641
+ "grad_norm": 1.4431898593902588,
1642
+ "learning_rate": 1.5230484360873044e-08,
1643
+ "loss": 3.1692,
1644
+ "step": 229
1645
+ },
1646
+ {
1647
+ "epoch": 0.9978308026030369,
1648
+ "grad_norm": 1.78467857837677,
1649
+ "learning_rate": 0.0,
1650
+ "loss": 2.932,
1651
+ "step": 230
1652
  }
1653
  ],
1654
  "logging_steps": 1,
 
1663
  "should_evaluate": false,
1664
  "should_log": false,
1665
  "should_save": true,
1666
+ "should_training_stop": true
1667
  },
1668
  "attributes": {}
1669
  }
1670
  },
1671
+ "total_flos": 5.142510989790413e+17,
1672
  "train_batch_size": 2,
1673
  "trial_name": null,
1674
  "trial_params": null