RoyJoy commited on
Commit
d47edbb
·
verified ·
1 Parent(s): 49a3471

Training in progress, step 250, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7019cb9aa201aea19db1270f90077e433f32992e44dade7548821d8c154f8dd
3
  size 13587864
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cce123c0683ef9cd6975eb4e7a421ac1277037ddb1f56ad868f4857ff4175af
3
  size 13587864
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:386c3f379a00095fe8496f8f431507c91666e8cfccb1ccdbd5d46ffa72194cd7
3
  size 27273018
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8682728647446457022a625059062c8515250852f037491dc6e4ba299841c1d0
3
  size 27273018
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a23fd22389371cc845ff538f36c85d6acc6b9021c5b65cb5faef2f7e821d95a8
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:050e050c2c08370bd1f9ba1c8620beabe7ff029584c29e4cd6c089022f033e90
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fb312fb50ba43fd98c0b5fe4042c1e3ef9cd58378ec0d1e1bf5c56e15568b12
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8a26ba280c3e7ef5570adf2f8b789f2d472d32e5416b32aa7525b9226e0be
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4689c594a2b486efb36e60e76c3ec318218071433606da34ac3dcedbc93ca127
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3fbc12a030d5fdd31b311d40ef25f1be4dcd3dcc88032d5c293f225b052d01f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ba2494af9fc250e587279ec6c23697d4a4eee3ba750a8eea5d8a07f578adf0b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39522c45970a3ae5ecbf73e1051ca4989117a8b949dcd4b7f4ee9663321048b0
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42c29d847f75ce9649efda10a945d3da08ad95d9c7cdbd2e690e733de315eb64
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f008ed445b652736016defc4807eff9b5ec48a500ab7e9db898ce35023867e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6848325729370117,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
- "epoch": 1.5252621544327931,
5
  "eval_steps": 25,
6
- "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1479,6 +1479,372 @@
1479
  "eval_samples_per_second": 177.685,
1480
  "eval_steps_per_second": 46.198,
1481
  "step": 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1482
  }
1483
  ],
1484
  "logging_steps": 1,
@@ -1507,7 +1873,7 @@
1507
  "attributes": {}
1508
  }
1509
  },
1510
- "total_flos": 8.88560550936576e+16,
1511
  "train_batch_size": 1,
1512
  "trial_name": null,
1513
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.6708096265792847,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-250",
4
+ "epoch": 1.9065776930409915,
5
  "eval_steps": 25,
6
+ "global_step": 250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1479
  "eval_samples_per_second": 177.685,
1480
  "eval_steps_per_second": 46.198,
1481
  "step": 200
1482
+ },
1483
+ {
1484
+ "epoch": 1.532888465204957,
1485
+ "grad_norm": 0.382758229970932,
1486
+ "learning_rate": 6.722664380155198e-05,
1487
+ "loss": 1.6259,
1488
+ "step": 201
1489
+ },
1490
+ {
1491
+ "epoch": 1.5405147759771212,
1492
+ "grad_norm": 0.3803237974643707,
1493
+ "learning_rate": 6.609167332422427e-05,
1494
+ "loss": 1.5547,
1495
+ "step": 202
1496
+ },
1497
+ {
1498
+ "epoch": 1.548141086749285,
1499
+ "grad_norm": 0.4076535701751709,
1500
+ "learning_rate": 6.497159807708055e-05,
1501
+ "loss": 1.5846,
1502
+ "step": 203
1503
+ },
1504
+ {
1505
+ "epoch": 1.555767397521449,
1506
+ "grad_norm": 0.41619113087654114,
1507
+ "learning_rate": 6.386658673933301e-05,
1508
+ "loss": 1.6648,
1509
+ "step": 204
1510
+ },
1511
+ {
1512
+ "epoch": 1.5633937082936131,
1513
+ "grad_norm": 0.4555559754371643,
1514
+ "learning_rate": 6.277680572162459e-05,
1515
+ "loss": 1.6636,
1516
+ "step": 205
1517
+ },
1518
+ {
1519
+ "epoch": 1.5710200190657768,
1520
+ "grad_norm": 0.5715373158454895,
1521
+ "learning_rate": 6.170241914096804e-05,
1522
+ "loss": 1.7265,
1523
+ "step": 206
1524
+ },
1525
+ {
1526
+ "epoch": 1.578646329837941,
1527
+ "grad_norm": 0.39986926317214966,
1528
+ "learning_rate": 6.06435887960305e-05,
1529
+ "loss": 1.614,
1530
+ "step": 207
1531
+ },
1532
+ {
1533
+ "epoch": 1.5862726406101049,
1534
+ "grad_norm": 0.4014238119125366,
1535
+ "learning_rate": 5.960047414276724e-05,
1536
+ "loss": 1.5169,
1537
+ "step": 208
1538
+ },
1539
+ {
1540
+ "epoch": 1.5938989513822688,
1541
+ "grad_norm": 0.40676450729370117,
1542
+ "learning_rate": 5.857323227040816e-05,
1543
+ "loss": 1.5836,
1544
+ "step": 209
1545
+ },
1546
+ {
1547
+ "epoch": 1.601525262154433,
1548
+ "grad_norm": 0.38130614161491394,
1549
+ "learning_rate": 5.756201787780074e-05,
1550
+ "loss": 1.5636,
1551
+ "step": 210
1552
+ },
1553
+ {
1554
+ "epoch": 1.6091515729265966,
1555
+ "grad_norm": 0.4001769721508026,
1556
+ "learning_rate": 5.656698325011295e-05,
1557
+ "loss": 1.5641,
1558
+ "step": 211
1559
+ },
1560
+ {
1561
+ "epoch": 1.6167778836987607,
1562
+ "grad_norm": 0.3762960433959961,
1563
+ "learning_rate": 5.5588278235899724e-05,
1564
+ "loss": 1.615,
1565
+ "step": 212
1566
+ },
1567
+ {
1568
+ "epoch": 1.6244041944709247,
1569
+ "grad_norm": 0.372916042804718,
1570
+ "learning_rate": 5.462605022453621e-05,
1571
+ "loss": 1.6307,
1572
+ "step": 213
1573
+ },
1574
+ {
1575
+ "epoch": 1.6320305052430886,
1576
+ "grad_norm": 0.4022381603717804,
1577
+ "learning_rate": 5.368044412402161e-05,
1578
+ "loss": 1.5634,
1579
+ "step": 214
1580
+ },
1581
+ {
1582
+ "epoch": 1.6396568160152527,
1583
+ "grad_norm": 0.39602571725845337,
1584
+ "learning_rate": 5.275160233915637e-05,
1585
+ "loss": 1.6328,
1586
+ "step": 215
1587
+ },
1588
+ {
1589
+ "epoch": 1.6472831267874166,
1590
+ "grad_norm": 0.4366743266582489,
1591
+ "learning_rate": 5.183966475009686e-05,
1592
+ "loss": 1.6038,
1593
+ "step": 216
1594
+ },
1595
+ {
1596
+ "epoch": 1.6549094375595805,
1597
+ "grad_norm": 0.458132803440094,
1598
+ "learning_rate": 5.0944768691289534e-05,
1599
+ "loss": 1.6384,
1600
+ "step": 217
1601
+ },
1602
+ {
1603
+ "epoch": 1.6625357483317447,
1604
+ "grad_norm": 0.4853437840938568,
1605
+ "learning_rate": 5.0067048930789196e-05,
1606
+ "loss": 1.6787,
1607
+ "step": 218
1608
+ },
1609
+ {
1610
+ "epoch": 1.6701620591039084,
1611
+ "grad_norm": 0.4034062325954437,
1612
+ "learning_rate": 4.920663764996328e-05,
1613
+ "loss": 1.5721,
1614
+ "step": 219
1615
+ },
1616
+ {
1617
+ "epoch": 1.6777883698760725,
1618
+ "grad_norm": 0.4523112177848816,
1619
+ "learning_rate": 4.8363664423585795e-05,
1620
+ "loss": 1.6327,
1621
+ "step": 220
1622
+ },
1623
+ {
1624
+ "epoch": 1.6854146806482364,
1625
+ "grad_norm": 0.4193178415298462,
1626
+ "learning_rate": 4.753825620032397e-05,
1627
+ "loss": 1.5354,
1628
+ "step": 221
1629
+ },
1630
+ {
1631
+ "epoch": 1.6930409914204003,
1632
+ "grad_norm": 0.3819790482521057,
1633
+ "learning_rate": 4.673053728362012e-05,
1634
+ "loss": 1.5833,
1635
+ "step": 222
1636
+ },
1637
+ {
1638
+ "epoch": 1.7006673021925645,
1639
+ "grad_norm": 0.3742893636226654,
1640
+ "learning_rate": 4.5940629312972085e-05,
1641
+ "loss": 1.5805,
1642
+ "step": 223
1643
+ },
1644
+ {
1645
+ "epoch": 1.7082936129647281,
1646
+ "grad_norm": 0.37698328495025635,
1647
+ "learning_rate": 4.516865124561473e-05,
1648
+ "loss": 1.5632,
1649
+ "step": 224
1650
+ },
1651
+ {
1652
+ "epoch": 1.7159199237368923,
1653
+ "grad_norm": 0.39410701394081116,
1654
+ "learning_rate": 4.4414719338605445e-05,
1655
+ "loss": 1.6016,
1656
+ "step": 225
1657
+ },
1658
+ {
1659
+ "epoch": 1.7159199237368923,
1660
+ "eval_loss": 1.6763724088668823,
1661
+ "eval_runtime": 0.281,
1662
+ "eval_samples_per_second": 177.963,
1663
+ "eval_steps_per_second": 46.27,
1664
+ "step": 225
1665
+ },
1666
+ {
1667
+ "epoch": 1.7235462345090562,
1668
+ "grad_norm": 0.41081416606903076,
1669
+ "learning_rate": 4.367894713131622e-05,
1670
+ "loss": 1.5998,
1671
+ "step": 226
1672
+ },
1673
+ {
1674
+ "epoch": 1.73117254528122,
1675
+ "grad_norm": 0.4001181721687317,
1676
+ "learning_rate": 4.296144542833515e-05,
1677
+ "loss": 1.6213,
1678
+ "step": 227
1679
+ },
1680
+ {
1681
+ "epoch": 1.7387988560533842,
1682
+ "grad_norm": 0.4368586242198944,
1683
+ "learning_rate": 4.226232228277948e-05,
1684
+ "loss": 1.6338,
1685
+ "step": 228
1686
+ },
1687
+ {
1688
+ "epoch": 1.7464251668255482,
1689
+ "grad_norm": 0.4104710817337036,
1690
+ "learning_rate": 4.1581682980023354e-05,
1691
+ "loss": 1.6433,
1692
+ "step": 229
1693
+ },
1694
+ {
1695
+ "epoch": 1.754051477597712,
1696
+ "grad_norm": 0.4876128137111664,
1697
+ "learning_rate": 4.0919630021842204e-05,
1698
+ "loss": 1.6381,
1699
+ "step": 230
1700
+ },
1701
+ {
1702
+ "epoch": 1.7616777883698762,
1703
+ "grad_norm": 0.5875245332717896,
1704
+ "learning_rate": 4.027626311097629e-05,
1705
+ "loss": 1.7134,
1706
+ "step": 231
1707
+ },
1708
+ {
1709
+ "epoch": 1.76930409914204,
1710
+ "grad_norm": 0.40997183322906494,
1711
+ "learning_rate": 3.965167913611591e-05,
1712
+ "loss": 1.5599,
1713
+ "step": 232
1714
+ },
1715
+ {
1716
+ "epoch": 1.776930409914204,
1717
+ "grad_norm": 0.42695024609565735,
1718
+ "learning_rate": 3.9045972157310256e-05,
1719
+ "loss": 1.5685,
1720
+ "step": 233
1721
+ },
1722
+ {
1723
+ "epoch": 1.784556720686368,
1724
+ "grad_norm": 0.4017082452774048,
1725
+ "learning_rate": 3.845923339180239e-05,
1726
+ "loss": 1.5493,
1727
+ "step": 234
1728
+ },
1729
+ {
1730
+ "epoch": 1.7921830314585319,
1731
+ "grad_norm": 0.38347816467285156,
1732
+ "learning_rate": 3.78915512002922e-05,
1733
+ "loss": 1.5464,
1734
+ "step": 235
1735
+ },
1736
+ {
1737
+ "epoch": 1.799809342230696,
1738
+ "grad_norm": 0.40365535020828247,
1739
+ "learning_rate": 3.734301107362964e-05,
1740
+ "loss": 1.6257,
1741
+ "step": 236
1742
+ },
1743
+ {
1744
+ "epoch": 1.80743565300286,
1745
+ "grad_norm": 0.3748120963573456,
1746
+ "learning_rate": 3.681369561994005e-05,
1747
+ "loss": 1.5456,
1748
+ "step": 237
1749
+ },
1750
+ {
1751
+ "epoch": 1.8150619637750238,
1752
+ "grad_norm": 0.36911335587501526,
1753
+ "learning_rate": 3.6303684552183827e-05,
1754
+ "loss": 1.5886,
1755
+ "step": 238
1756
+ },
1757
+ {
1758
+ "epoch": 1.822688274547188,
1759
+ "grad_norm": 0.3796713948249817,
1760
+ "learning_rate": 3.581305467615181e-05,
1761
+ "loss": 1.5858,
1762
+ "step": 239
1763
+ },
1764
+ {
1765
+ "epoch": 1.8303145853193517,
1766
+ "grad_norm": 0.4003843665122986,
1767
+ "learning_rate": 3.5341879878898615e-05,
1768
+ "loss": 1.6126,
1769
+ "step": 240
1770
+ },
1771
+ {
1772
+ "epoch": 1.8379408960915158,
1773
+ "grad_norm": 0.4427483379840851,
1774
+ "learning_rate": 3.489023111761562e-05,
1775
+ "loss": 1.6487,
1776
+ "step": 241
1777
+ },
1778
+ {
1779
+ "epoch": 1.8455672068636797,
1780
+ "grad_norm": 0.4299188554286957,
1781
+ "learning_rate": 3.445817640894497e-05,
1782
+ "loss": 1.6723,
1783
+ "step": 242
1784
+ },
1785
+ {
1786
+ "epoch": 1.8531935176358436,
1787
+ "grad_norm": 0.5241357684135437,
1788
+ "learning_rate": 3.404578081873656e-05,
1789
+ "loss": 1.6198,
1790
+ "step": 243
1791
+ },
1792
+ {
1793
+ "epoch": 1.8608198284080077,
1794
+ "grad_norm": 0.40499526262283325,
1795
+ "learning_rate": 3.365310645224939e-05,
1796
+ "loss": 1.5758,
1797
+ "step": 244
1798
+ },
1799
+ {
1800
+ "epoch": 1.8684461391801714,
1801
+ "grad_norm": 0.43495872616767883,
1802
+ "learning_rate": 3.328021244479866e-05,
1803
+ "loss": 1.5897,
1804
+ "step": 245
1805
+ },
1806
+ {
1807
+ "epoch": 1.8760724499523356,
1808
+ "grad_norm": 0.39778581261634827,
1809
+ "learning_rate": 3.292715495285028e-05,
1810
+ "loss": 1.5267,
1811
+ "step": 246
1812
+ },
1813
+ {
1814
+ "epoch": 1.8836987607244995,
1815
+ "grad_norm": 0.3671037256717682,
1816
+ "learning_rate": 3.259398714556389e-05,
1817
+ "loss": 1.5499,
1818
+ "step": 247
1819
+ },
1820
+ {
1821
+ "epoch": 1.8913250714966634,
1822
+ "grad_norm": 0.38671308755874634,
1823
+ "learning_rate": 3.2280759196785803e-05,
1824
+ "loss": 1.628,
1825
+ "step": 248
1826
+ },
1827
+ {
1828
+ "epoch": 1.8989513822688275,
1829
+ "grad_norm": 0.41623926162719727,
1830
+ "learning_rate": 3.1987518277492934e-05,
1831
+ "loss": 1.5699,
1832
+ "step": 249
1833
+ },
1834
+ {
1835
+ "epoch": 1.9065776930409915,
1836
+ "grad_norm": 0.38484105467796326,
1837
+ "learning_rate": 3.171430854868911e-05,
1838
+ "loss": 1.5702,
1839
+ "step": 250
1840
+ },
1841
+ {
1842
+ "epoch": 1.9065776930409915,
1843
+ "eval_loss": 1.6708096265792847,
1844
+ "eval_runtime": 0.2807,
1845
+ "eval_samples_per_second": 178.136,
1846
+ "eval_steps_per_second": 46.315,
1847
+ "step": 250
1848
  }
1849
  ],
1850
  "logging_steps": 1,
 
1873
  "attributes": {}
1874
  }
1875
  },
1876
+ "total_flos": 1.11070068867072e+17,
1877
  "train_batch_size": 1,
1878
  "trial_name": null,
1879
  "trial_params": null