ZeroUniqueness commited on
Commit
1151ad0
Β·
1 Parent(s): c01f689

Training in progress, step 29000

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:196c1d0349fe865fe652da8fb5f0318dc7fea1691a4f6b9ce949ba1d8fc568d2
3
  size 500897101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfba69a38235688144762f0028225bf75ceb8ed54f934556abed7f29749c0a1c
3
  size 500897101
{checkpoint-25000 β†’ checkpoint-28000/adapter_model}/README.md RENAMED
File without changes
{checkpoint-25000 β†’ checkpoint-28000/adapter_model}/adapter_config.json RENAMED
File without changes
{checkpoint-25000 β†’ checkpoint-28000/adapter_model}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8c2bc1b0742a9846ba8422b26d8c01eefc4db7cfed8f3b0f57ff4cb0ca36737
3
  size 500897101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:196c1d0349fe865fe652da8fb5f0318dc7fea1691a4f6b9ce949ba1d8fc568d2
3
  size 500897101
{checkpoint-25000/adapter_model β†’ checkpoint-29000}/README.md RENAMED
File without changes
{checkpoint-25000/adapter_model β†’ checkpoint-29000}/adapter_config.json RENAMED
File without changes
{checkpoint-25000/adapter_model β†’ checkpoint-29000}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8c2bc1b0742a9846ba8422b26d8c01eefc4db7cfed8f3b0f57ff4cb0ca36737
3
  size 500897101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfba69a38235688144762f0028225bf75ceb8ed54f934556abed7f29749c0a1c
3
  size 500897101
{checkpoint-25000 β†’ checkpoint-29000}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e74c891d56897c0b02740eeeace0d9ae8070e3f14d7a6244df64ddb88ba79e38
3
  size 1001723453
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c38cecc477586e96952b6fa6d1999766615cbde5d62f009e1485539b7febd398
3
  size 1001723453
{checkpoint-25000 β†’ checkpoint-29000}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:629e0e9594e58952b7c72ab80b4684281712821515b182bc6bccbc5f679fe7a2
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fbefb1656f1ff612ca4102ba7149fb51afc251478e6c2b38858db29d4dbce09
3
  size 14575
{checkpoint-25000 β†’ checkpoint-29000}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88e0bf9cf69ff1eacc409b75f32fab7577ca257cb9a30972758ae80dacd7ec2e
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89994c4455e86b6de985533c4029b82858746f868501ad2119541b04d3058514
3
  size 627
{checkpoint-25000 β†’ checkpoint-29000}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 0.6086174249649048,
3
- "best_model_checkpoint": "./qlora-out/checkpoint-25000",
4
- "epoch": 0.9321054397673465,
5
- "global_step": 25000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -1706,11 +1706,283 @@
1706
  "eval_samples_per_second": 0.43,
1707
  "eval_steps_per_second": 0.43,
1708
  "step": 25000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1709
  }
1710
  ],
1711
  "max_steps": 80463,
1712
  "num_train_epochs": 3,
1713
- "total_flos": 7.008735568518267e+18,
1714
  "trial_name": null,
1715
  "trial_params": null
1716
  }
 
1
  {
2
+ "best_metric": 0.5910914540290833,
3
+ "best_model_checkpoint": "./qlora-out/checkpoint-29000",
4
+ "epoch": 1.0812423101301218,
5
+ "global_step": 29000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
1706
  "eval_samples_per_second": 0.43,
1707
  "eval_steps_per_second": 0.43,
1708
  "step": 25000
1709
+ },
1710
+ {
1711
+ "epoch": 0.94,
1712
+ "learning_rate": 0.00015572432311170096,
1713
+ "loss": 0.597,
1714
+ "step": 25100
1715
+ },
1716
+ {
1717
+ "epoch": 0.94,
1718
+ "learning_rate": 0.00015539965769593894,
1719
+ "loss": 0.5657,
1720
+ "step": 25200
1721
+ },
1722
+ {
1723
+ "epoch": 0.94,
1724
+ "learning_rate": 0.0001550741475426484,
1725
+ "loss": 0.6081,
1726
+ "step": 25300
1727
+ },
1728
+ {
1729
+ "epoch": 0.95,
1730
+ "learning_rate": 0.00015474779761522894,
1731
+ "loss": 0.5957,
1732
+ "step": 25400
1733
+ },
1734
+ {
1735
+ "epoch": 0.95,
1736
+ "learning_rate": 0.00015442061288988525,
1737
+ "loss": 0.6032,
1738
+ "step": 25500
1739
+ },
1740
+ {
1741
+ "epoch": 0.95,
1742
+ "learning_rate": 0.00015409259835555089,
1743
+ "loss": 0.5662,
1744
+ "step": 25600
1745
+ },
1746
+ {
1747
+ "epoch": 0.96,
1748
+ "learning_rate": 0.00015376375901381256,
1749
+ "loss": 0.5607,
1750
+ "step": 25700
1751
+ },
1752
+ {
1753
+ "epoch": 0.96,
1754
+ "learning_rate": 0.00015343409987883354,
1755
+ "loss": 0.5727,
1756
+ "step": 25800
1757
+ },
1758
+ {
1759
+ "epoch": 0.97,
1760
+ "learning_rate": 0.00015310362597727747,
1761
+ "loss": 0.5762,
1762
+ "step": 25900
1763
+ },
1764
+ {
1765
+ "epoch": 0.97,
1766
+ "learning_rate": 0.00015277234234823154,
1767
+ "loss": 0.5841,
1768
+ "step": 26000
1769
+ },
1770
+ {
1771
+ "epoch": 0.97,
1772
+ "eval_loss": 0.6026987433433533,
1773
+ "eval_runtime": 1292.1515,
1774
+ "eval_samples_per_second": 0.419,
1775
+ "eval_steps_per_second": 0.419,
1776
+ "step": 26000
1777
+ },
1778
+ {
1779
+ "epoch": 0.97,
1780
+ "learning_rate": 0.00015244025404312974,
1781
+ "loss": 0.6015,
1782
+ "step": 26100
1783
+ },
1784
+ {
1785
+ "epoch": 0.98,
1786
+ "learning_rate": 0.00015210736612567588,
1787
+ "loss": 0.5914,
1788
+ "step": 26200
1789
+ },
1790
+ {
1791
+ "epoch": 0.98,
1792
+ "learning_rate": 0.00015177368367176616,
1793
+ "loss": 0.5799,
1794
+ "step": 26300
1795
+ },
1796
+ {
1797
+ "epoch": 0.98,
1798
+ "learning_rate": 0.00015143921176941205,
1799
+ "loss": 0.6037,
1800
+ "step": 26400
1801
+ },
1802
+ {
1803
+ "epoch": 0.99,
1804
+ "learning_rate": 0.00015110395551866255,
1805
+ "loss": 0.5876,
1806
+ "step": 26500
1807
+ },
1808
+ {
1809
+ "epoch": 0.99,
1810
+ "learning_rate": 0.0001507679200315264,
1811
+ "loss": 0.5973,
1812
+ "step": 26600
1813
+ },
1814
+ {
1815
+ "epoch": 1.0,
1816
+ "learning_rate": 0.00015043111043189423,
1817
+ "loss": 0.5957,
1818
+ "step": 26700
1819
+ },
1820
+ {
1821
+ "epoch": 1.0,
1822
+ "learning_rate": 0.00015009353185546046,
1823
+ "loss": 0.5696,
1824
+ "step": 26800
1825
+ },
1826
+ {
1827
+ "epoch": 1.0,
1828
+ "learning_rate": 0.00014975518944964478,
1829
+ "loss": 0.5523,
1830
+ "step": 26900
1831
+ },
1832
+ {
1833
+ "epoch": 1.01,
1834
+ "learning_rate": 0.0001494160883735139,
1835
+ "loss": 0.5144,
1836
+ "step": 27000
1837
+ },
1838
+ {
1839
+ "epoch": 1.01,
1840
+ "eval_loss": 0.5985096096992493,
1841
+ "eval_runtime": 1314.8131,
1842
+ "eval_samples_per_second": 0.412,
1843
+ "eval_steps_per_second": 0.412,
1844
+ "step": 27000
1845
+ },
1846
+ {
1847
+ "epoch": 1.01,
1848
+ "learning_rate": 0.00014907623379770263,
1849
+ "loss": 0.5743,
1850
+ "step": 27100
1851
+ },
1852
+ {
1853
+ "epoch": 1.01,
1854
+ "learning_rate": 0.00014873563090433547,
1855
+ "loss": 0.5095,
1856
+ "step": 27200
1857
+ },
1858
+ {
1859
+ "epoch": 1.02,
1860
+ "learning_rate": 0.00014839428488694706,
1861
+ "loss": 0.5391,
1862
+ "step": 27300
1863
+ },
1864
+ {
1865
+ "epoch": 1.02,
1866
+ "learning_rate": 0.00014805220095040334,
1867
+ "loss": 0.5532,
1868
+ "step": 27400
1869
+ },
1870
+ {
1871
+ "epoch": 1.03,
1872
+ "learning_rate": 0.00014770938431082212,
1873
+ "loss": 0.536,
1874
+ "step": 27500
1875
+ },
1876
+ {
1877
+ "epoch": 1.03,
1878
+ "learning_rate": 0.00014736584019549342,
1879
+ "loss": 0.5204,
1880
+ "step": 27600
1881
+ },
1882
+ {
1883
+ "epoch": 1.03,
1884
+ "learning_rate": 0.00014702157384279997,
1885
+ "loss": 0.5026,
1886
+ "step": 27700
1887
+ },
1888
+ {
1889
+ "epoch": 1.04,
1890
+ "learning_rate": 0.0001466765905021371,
1891
+ "loss": 0.5319,
1892
+ "step": 27800
1893
+ },
1894
+ {
1895
+ "epoch": 1.04,
1896
+ "learning_rate": 0.00014633089543383295,
1897
+ "loss": 0.5112,
1898
+ "step": 27900
1899
+ },
1900
+ {
1901
+ "epoch": 1.04,
1902
+ "learning_rate": 0.00014598449390906804,
1903
+ "loss": 0.5146,
1904
+ "step": 28000
1905
+ },
1906
+ {
1907
+ "epoch": 1.04,
1908
+ "eval_loss": 0.5959522128105164,
1909
+ "eval_runtime": 1288.6066,
1910
+ "eval_samples_per_second": 0.421,
1911
+ "eval_steps_per_second": 0.421,
1912
+ "step": 28000
1913
+ },
1914
+ {
1915
+ "epoch": 1.05,
1916
+ "learning_rate": 0.00014563739120979497,
1917
+ "loss": 0.5262,
1918
+ "step": 28100
1919
+ },
1920
+ {
1921
+ "epoch": 1.05,
1922
+ "learning_rate": 0.00014528959262865798,
1923
+ "loss": 0.5082,
1924
+ "step": 28200
1925
+ },
1926
+ {
1927
+ "epoch": 1.06,
1928
+ "learning_rate": 0.00014494110346891206,
1929
+ "loss": 0.5094,
1930
+ "step": 28300
1931
+ },
1932
+ {
1933
+ "epoch": 1.06,
1934
+ "learning_rate": 0.00014459192904434226,
1935
+ "loss": 0.5012,
1936
+ "step": 28400
1937
+ },
1938
+ {
1939
+ "epoch": 1.06,
1940
+ "learning_rate": 0.0001442420746791826,
1941
+ "loss": 0.4946,
1942
+ "step": 28500
1943
+ },
1944
+ {
1945
+ "epoch": 1.07,
1946
+ "learning_rate": 0.00014389154570803477,
1947
+ "loss": 0.5138,
1948
+ "step": 28600
1949
+ },
1950
+ {
1951
+ "epoch": 1.07,
1952
+ "learning_rate": 0.000143540347475787,
1953
+ "loss": 0.5082,
1954
+ "step": 28700
1955
+ },
1956
+ {
1957
+ "epoch": 1.07,
1958
+ "learning_rate": 0.0001431884853375325,
1959
+ "loss": 0.4842,
1960
+ "step": 28800
1961
+ },
1962
+ {
1963
+ "epoch": 1.08,
1964
+ "learning_rate": 0.0001428359646584876,
1965
+ "loss": 0.5143,
1966
+ "step": 28900
1967
+ },
1968
+ {
1969
+ "epoch": 1.08,
1970
+ "learning_rate": 0.00014248279081391022,
1971
+ "loss": 0.5029,
1972
+ "step": 29000
1973
+ },
1974
+ {
1975
+ "epoch": 1.08,
1976
+ "eval_loss": 0.5910914540290833,
1977
+ "eval_runtime": 1278.8257,
1978
+ "eval_samples_per_second": 0.424,
1979
+ "eval_steps_per_second": 0.424,
1980
+ "step": 29000
1981
  }
1982
  ],
1983
  "max_steps": 80463,
1984
  "num_train_epochs": 3,
1985
+ "total_flos": 8.134006163484918e+18,
1986
  "trial_name": null,
1987
  "trial_params": null
1988
  }
{checkpoint-25000 β†’ checkpoint-29000}/training_args.bin RENAMED
File without changes