{ "best_metric": null, "best_model_checkpoint": null, "epoch": 933.3333333333334, "global_step": 1400, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 6.67, "learning_rate": 1.0223707272628328e-05, "loss": 2.0543, "step": 10 }, { "epoch": 13.33, "learning_rate": 1.4885550540243259e-05, "loss": 1.4127, "step": 20 }, { "epoch": 20.0, "learning_rate": 1.73161496447939e-05, "loss": 0.5013, "step": 30 }, { "epoch": 26.67, "learning_rate": 1.897156851886858e-05, "loss": 0.1121, "step": 40 }, { "epoch": 33.33, "learning_rate": 2e-05, "loss": 0.0576, "step": 50 }, { "epoch": 40.0, "learning_rate": 2e-05, "loss": 0.039, "step": 60 }, { "epoch": 46.67, "learning_rate": 2e-05, "loss": 0.0296, "step": 70 }, { "epoch": 53.33, "learning_rate": 2e-05, "loss": 0.0221, "step": 80 }, { "epoch": 60.0, "learning_rate": 2e-05, "loss": 0.0173, "step": 90 }, { "epoch": 66.67, "learning_rate": 2e-05, "loss": 0.0134, "step": 100 }, { "epoch": 73.33, "learning_rate": 2e-05, "loss": 0.011, "step": 110 }, { "epoch": 80.0, "learning_rate": 2e-05, "loss": 0.0099, "step": 120 }, { "epoch": 86.67, "learning_rate": 2e-05, "loss": 0.0092, "step": 130 }, { "epoch": 93.33, "learning_rate": 2e-05, "loss": 0.0078, "step": 140 }, { "epoch": 100.0, "learning_rate": 2e-05, "loss": 0.0072, "step": 150 }, { "epoch": 106.67, "learning_rate": 2e-05, "loss": 0.01, "step": 160 }, { "epoch": 113.33, "learning_rate": 2e-05, "loss": 0.0169, "step": 170 }, { "epoch": 120.0, "learning_rate": 2e-05, "loss": 0.0064, "step": 180 }, { "epoch": 126.67, "learning_rate": 2e-05, "loss": 0.0058, "step": 190 }, { "epoch": 133.33, "learning_rate": 2e-05, "loss": 0.0154, "step": 200 }, { "epoch": 140.0, "learning_rate": 2e-05, "loss": 0.0126, "step": 210 }, { "epoch": 146.67, "learning_rate": 2e-05, "loss": 0.0052, "step": 220 }, { "epoch": 153.33, "learning_rate": 2e-05, "loss": 0.0076, "step": 230 }, { "epoch": 160.0, "learning_rate": 2e-05, "loss": 0.006, "step": 240 }, { "epoch": 166.67, "learning_rate": 2e-05, "loss": 0.005, "step": 250 }, { "epoch": 173.33, "learning_rate": 2e-05, "loss": 0.0047, "step": 260 }, { "epoch": 180.0, "learning_rate": 2e-05, "loss": 0.0041, "step": 270 }, { "epoch": 186.67, "learning_rate": 2e-05, "loss": 0.0044, "step": 280 }, { "epoch": 193.33, "learning_rate": 2e-05, "loss": 0.0042, "step": 290 }, { "epoch": 200.0, "learning_rate": 2e-05, "loss": 0.0038, "step": 300 }, { "epoch": 206.67, "learning_rate": 2e-05, "loss": 0.004, "step": 310 }, { "epoch": 213.33, "learning_rate": 2e-05, "loss": 0.0039, "step": 320 }, { "epoch": 220.0, "learning_rate": 2e-05, "loss": 0.0041, "step": 330 }, { "epoch": 226.67, "learning_rate": 2e-05, "loss": 0.0042, "step": 340 }, { "epoch": 233.33, "learning_rate": 2e-05, "loss": 0.0041, "step": 350 }, { "epoch": 240.0, "learning_rate": 2e-05, "loss": 0.0039, "step": 360 }, { "epoch": 246.67, "learning_rate": 2e-05, "loss": 0.0037, "step": 370 }, { "epoch": 253.33, "learning_rate": 2e-05, "loss": 0.0039, "step": 380 }, { "epoch": 260.0, "learning_rate": 2e-05, "loss": 0.0037, "step": 390 }, { "epoch": 266.67, "learning_rate": 2e-05, "loss": 0.0036, "step": 400 }, { "epoch": 273.33, "learning_rate": 2e-05, "loss": 0.0037, "step": 410 }, { "epoch": 280.0, "learning_rate": 2e-05, "loss": 0.0038, "step": 420 }, { "epoch": 286.67, "learning_rate": 2e-05, "loss": 0.0037, "step": 430 }, { "epoch": 293.33, "learning_rate": 2e-05, "loss": 0.0037, "step": 440 }, { "epoch": 300.0, "learning_rate": 2e-05, "loss": 0.0036, "step": 450 }, { "epoch": 306.67, "learning_rate": 2e-05, "loss": 0.0036, "step": 460 }, { "epoch": 313.33, "learning_rate": 2e-05, "loss": 0.0037, "step": 470 }, { "epoch": 320.0, "learning_rate": 2e-05, "loss": 0.0035, "step": 480 }, { "epoch": 326.67, "learning_rate": 2e-05, "loss": 0.0036, "step": 490 }, { "epoch": 333.33, "learning_rate": 2e-05, "loss": 0.0034, "step": 500 }, { "epoch": 340.0, "learning_rate": 2e-05, "loss": 0.0038, "step": 510 }, { "epoch": 346.67, "learning_rate": 2e-05, "loss": 0.0034, "step": 520 }, { "epoch": 353.33, "learning_rate": 2e-05, "loss": 0.0035, "step": 530 }, { "epoch": 360.0, "learning_rate": 2e-05, "loss": 0.0035, "step": 540 }, { "epoch": 366.67, "learning_rate": 2e-05, "loss": 0.0034, "step": 550 }, { "epoch": 373.33, "learning_rate": 2e-05, "loss": 0.0033, "step": 560 }, { "epoch": 380.0, "learning_rate": 2e-05, "loss": 0.0032, "step": 570 }, { "epoch": 386.67, "learning_rate": 2e-05, "loss": 0.0034, "step": 580 }, { "epoch": 393.33, "learning_rate": 2e-05, "loss": 0.0033, "step": 590 }, { "epoch": 400.0, "learning_rate": 2e-05, "loss": 0.0035, "step": 600 }, { "epoch": 406.67, "learning_rate": 2e-05, "loss": 0.0034, "step": 610 }, { "epoch": 413.33, "learning_rate": 2e-05, "loss": 0.0038, "step": 620 }, { "epoch": 420.0, "learning_rate": 2e-05, "loss": 0.0035, "step": 630 }, { "epoch": 426.67, "learning_rate": 2e-05, "loss": 0.0035, "step": 640 }, { "epoch": 433.33, "learning_rate": 2e-05, "loss": 0.0036, "step": 650 }, { "epoch": 440.0, "learning_rate": 2e-05, "loss": 0.0054, "step": 660 }, { "epoch": 446.67, "learning_rate": 2e-05, "loss": 0.0048, "step": 670 }, { "epoch": 453.33, "learning_rate": 2e-05, "loss": 0.0041, "step": 680 }, { "epoch": 460.0, "learning_rate": 2e-05, "loss": 0.004, "step": 690 }, { "epoch": 466.67, "learning_rate": 2e-05, "loss": 0.005, "step": 700 }, { "epoch": 473.33, "learning_rate": 2e-05, "loss": 0.0052, "step": 710 }, { "epoch": 480.0, "learning_rate": 2e-05, "loss": 0.0049, "step": 720 }, { "epoch": 486.67, "learning_rate": 2e-05, "loss": 0.0053, "step": 730 }, { "epoch": 493.33, "learning_rate": 2e-05, "loss": 0.0046, "step": 740 }, { "epoch": 500.0, "learning_rate": 2e-05, "loss": 0.0049, "step": 750 }, { "epoch": 506.67, "learning_rate": 2e-05, "loss": 0.0046, "step": 760 }, { "epoch": 513.33, "learning_rate": 2e-05, "loss": 0.0043, "step": 770 }, { "epoch": 520.0, "learning_rate": 2e-05, "loss": 0.0045, "step": 780 }, { "epoch": 526.67, "learning_rate": 2e-05, "loss": 0.0044, "step": 790 }, { "epoch": 533.33, "learning_rate": 2e-05, "loss": 0.0044, "step": 800 }, { "epoch": 540.0, "learning_rate": 2e-05, "loss": 0.0039, "step": 810 }, { "epoch": 546.67, "learning_rate": 2e-05, "loss": 0.0039, "step": 820 }, { "epoch": 553.33, "learning_rate": 2e-05, "loss": 0.0039, "step": 830 }, { "epoch": 560.0, "learning_rate": 2e-05, "loss": 0.0036, "step": 840 }, { "epoch": 566.67, "learning_rate": 2e-05, "loss": 0.0035, "step": 850 }, { "epoch": 573.33, "learning_rate": 2e-05, "loss": 0.0034, "step": 860 }, { "epoch": 580.0, "learning_rate": 2e-05, "loss": 0.0033, "step": 870 }, { "epoch": 586.67, "learning_rate": 2e-05, "loss": 0.0033, "step": 880 }, { "epoch": 593.33, "learning_rate": 2e-05, "loss": 0.0035, "step": 890 }, { "epoch": 600.0, "learning_rate": 2e-05, "loss": 0.0037, "step": 900 }, { "epoch": 606.67, "learning_rate": 2e-05, "loss": 0.0047, "step": 910 }, { "epoch": 613.33, "learning_rate": 2e-05, "loss": 0.004, "step": 920 }, { "epoch": 620.0, "learning_rate": 2e-05, "loss": 0.004, "step": 930 }, { "epoch": 626.67, "learning_rate": 2e-05, "loss": 0.0036, "step": 940 }, { "epoch": 633.33, "learning_rate": 2e-05, "loss": 0.0037, "step": 950 }, { "epoch": 640.0, "learning_rate": 2e-05, "loss": 0.004, "step": 960 }, { "epoch": 646.67, "learning_rate": 2e-05, "loss": 0.0042, "step": 970 }, { "epoch": 653.33, "learning_rate": 2e-05, "loss": 0.0037, "step": 980 }, { "epoch": 660.0, "learning_rate": 2e-05, "loss": 0.0036, "step": 990 }, { "epoch": 666.67, "learning_rate": 2e-05, "loss": 0.0035, "step": 1000 }, { "epoch": 673.33, "learning_rate": 2e-05, "loss": 0.0036, "step": 1010 }, { "epoch": 680.0, "learning_rate": 2e-05, "loss": 0.0035, "step": 1020 }, { "epoch": 686.67, "learning_rate": 2e-05, "loss": 0.0034, "step": 1030 }, { "epoch": 693.33, "learning_rate": 2e-05, "loss": 0.0034, "step": 1040 }, { "epoch": 700.0, "learning_rate": 2e-05, "loss": 0.0033, "step": 1050 }, { "epoch": 706.67, "learning_rate": 2e-05, "loss": 0.0033, "step": 1060 }, { "epoch": 713.33, "learning_rate": 2e-05, "loss": 0.0032, "step": 1070 }, { "epoch": 720.0, "learning_rate": 2e-05, "loss": 0.0032, "step": 1080 }, { "epoch": 726.67, "learning_rate": 2e-05, "loss": 0.0032, "step": 1090 }, { "epoch": 733.33, "learning_rate": 2e-05, "loss": 0.0032, "step": 1100 }, { "epoch": 740.0, "learning_rate": 2e-05, "loss": 0.0032, "step": 1110 }, { "epoch": 746.67, "learning_rate": 2e-05, "loss": 0.0031, "step": 1120 }, { "epoch": 753.33, "learning_rate": 2e-05, "loss": 0.0033, "step": 1130 }, { "epoch": 760.0, "learning_rate": 2e-05, "loss": 0.0033, "step": 1140 }, { "epoch": 766.67, "learning_rate": 2e-05, "loss": 0.0031, "step": 1150 }, { "epoch": 773.33, "learning_rate": 2e-05, "loss": 0.003, "step": 1160 }, { "epoch": 780.0, "learning_rate": 2e-05, "loss": 0.0032, "step": 1170 }, { "epoch": 786.67, "learning_rate": 2e-05, "loss": 0.0033, "step": 1180 }, { "epoch": 793.33, "learning_rate": 2e-05, "loss": 0.0031, "step": 1190 }, { "epoch": 800.0, "learning_rate": 2e-05, "loss": 0.0031, "step": 1200 }, { "epoch": 806.67, "learning_rate": 2e-05, "loss": 0.0032, "step": 1210 }, { "epoch": 813.33, "learning_rate": 2e-05, "loss": 0.0031, "step": 1220 }, { "epoch": 820.0, "learning_rate": 2e-05, "loss": 0.0033, "step": 1230 }, { "epoch": 826.67, "learning_rate": 2e-05, "loss": 0.0029, "step": 1240 }, { "epoch": 833.33, "learning_rate": 2e-05, "loss": 0.003, "step": 1250 }, { "epoch": 840.0, "learning_rate": 2e-05, "loss": 0.0031, "step": 1260 }, { "epoch": 846.67, "learning_rate": 2e-05, "loss": 0.0031, "step": 1270 }, { "epoch": 853.33, "learning_rate": 2e-05, "loss": 0.0032, "step": 1280 }, { "epoch": 860.0, "learning_rate": 2e-05, "loss": 0.0031, "step": 1290 }, { "epoch": 866.67, "learning_rate": 2e-05, "loss": 0.0029, "step": 1300 }, { "epoch": 873.33, "learning_rate": 2e-05, "loss": 0.0031, "step": 1310 }, { "epoch": 880.0, "learning_rate": 2e-05, "loss": 0.003, "step": 1320 }, { "epoch": 886.67, "learning_rate": 2e-05, "loss": 0.0032, "step": 1330 }, { "epoch": 893.33, "learning_rate": 2e-05, "loss": 0.0031, "step": 1340 }, { "epoch": 900.0, "learning_rate": 2e-05, "loss": 0.0031, "step": 1350 }, { "epoch": 906.67, "learning_rate": 2e-05, "loss": 0.0032, "step": 1360 }, { "epoch": 913.33, "learning_rate": 2e-05, "loss": 0.0031, "step": 1370 }, { "epoch": 920.0, "learning_rate": 2e-05, "loss": 0.0031, "step": 1380 }, { "epoch": 926.67, "learning_rate": 2e-05, "loss": 0.0031, "step": 1390 }, { "epoch": 933.33, "learning_rate": 2e-05, "loss": 0.0033, "step": 1400 } ], "max_steps": 1500, "num_train_epochs": 1500, "total_flos": 589055309905920.0, "trial_name": null, "trial_params": null }