{ "best_metric": 1.06325101852417, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.15464913976416006, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.003092982795283201, "grad_norm": 62.20377731323242, "learning_rate": 1e-05, "loss": 25.075, "step": 1 }, { "epoch": 0.003092982795283201, "eval_loss": 1.5490102767944336, "eval_runtime": 149.2461, "eval_samples_per_second": 3.652, "eval_steps_per_second": 1.829, "step": 1 }, { "epoch": 0.006185965590566402, "grad_norm": 66.19660949707031, "learning_rate": 2e-05, "loss": 23.6882, "step": 2 }, { "epoch": 0.009278948385849604, "grad_norm": 63.24820327758789, "learning_rate": 3e-05, "loss": 23.8152, "step": 3 }, { "epoch": 0.012371931181132804, "grad_norm": 65.91133880615234, "learning_rate": 4e-05, "loss": 24.4938, "step": 4 }, { "epoch": 0.015464913976416006, "grad_norm": 37.1609001159668, "learning_rate": 5e-05, "loss": 20.4001, "step": 5 }, { "epoch": 0.01855789677169921, "grad_norm": 37.836673736572266, "learning_rate": 6e-05, "loss": 21.0493, "step": 6 }, { "epoch": 0.02165087956698241, "grad_norm": 35.000755310058594, "learning_rate": 7e-05, "loss": 19.5046, "step": 7 }, { "epoch": 0.02474386236226561, "grad_norm": 31.371307373046875, "learning_rate": 8e-05, "loss": 18.7178, "step": 8 }, { "epoch": 0.02783684515754881, "grad_norm": 36.98615264892578, "learning_rate": 9e-05, "loss": 19.6468, "step": 9 }, { "epoch": 0.03092982795283201, "grad_norm": 33.72000503540039, "learning_rate": 0.0001, "loss": 19.1595, "step": 10 }, { "epoch": 0.034022810748115216, "grad_norm": 32.670860290527344, "learning_rate": 9.98458666866564e-05, "loss": 18.3969, "step": 11 }, { "epoch": 0.03711579354339842, "grad_norm": 35.979820251464844, "learning_rate": 9.938441702975689e-05, "loss": 19.4862, "step": 12 }, { "epoch": 0.04020877633868162, "grad_norm": 28.703033447265625, "learning_rate": 9.861849601988383e-05, "loss": 17.8546, "step": 13 }, { "epoch": 0.04330175913396482, "grad_norm": 24.526432037353516, "learning_rate": 9.755282581475769e-05, "loss": 17.7441, "step": 14 }, { "epoch": 0.046394741929248015, "grad_norm": 25.215848922729492, "learning_rate": 9.619397662556435e-05, "loss": 17.3959, "step": 15 }, { "epoch": 0.04948772472453122, "grad_norm": 26.21254539489746, "learning_rate": 9.45503262094184e-05, "loss": 17.1178, "step": 16 }, { "epoch": 0.05258070751981442, "grad_norm": 27.829181671142578, "learning_rate": 9.263200821770461e-05, "loss": 19.3058, "step": 17 }, { "epoch": 0.05567369031509762, "grad_norm": 25.054349899291992, "learning_rate": 9.045084971874738e-05, "loss": 17.6997, "step": 18 }, { "epoch": 0.05876667311038082, "grad_norm": 25.53619384765625, "learning_rate": 8.802029828000156e-05, "loss": 17.6693, "step": 19 }, { "epoch": 0.06185965590566402, "grad_norm": 29.731338500976562, "learning_rate": 8.535533905932738e-05, "loss": 18.1907, "step": 20 }, { "epoch": 0.06495263870094722, "grad_norm": 27.52916145324707, "learning_rate": 8.247240241650918e-05, "loss": 17.7958, "step": 21 }, { "epoch": 0.06804562149623043, "grad_norm": 27.228439331054688, "learning_rate": 7.938926261462366e-05, "loss": 18.8158, "step": 22 }, { "epoch": 0.07113860429151363, "grad_norm": 26.449893951416016, "learning_rate": 7.612492823579745e-05, "loss": 17.0907, "step": 23 }, { "epoch": 0.07423158708679684, "grad_norm": 26.7717227935791, "learning_rate": 7.269952498697734e-05, "loss": 19.3673, "step": 24 }, { "epoch": 0.07732456988208003, "grad_norm": 24.446931838989258, "learning_rate": 6.91341716182545e-05, "loss": 17.9268, "step": 25 }, { "epoch": 0.07732456988208003, "eval_loss": 1.0832998752593994, "eval_runtime": 150.2298, "eval_samples_per_second": 3.628, "eval_steps_per_second": 1.817, "step": 25 }, { "epoch": 0.08041755267736324, "grad_norm": 24.669111251831055, "learning_rate": 6.545084971874738e-05, "loss": 17.5459, "step": 26 }, { "epoch": 0.08351053547264643, "grad_norm": 23.340457916259766, "learning_rate": 6.167226819279528e-05, "loss": 16.5561, "step": 27 }, { "epoch": 0.08660351826792964, "grad_norm": 25.84408950805664, "learning_rate": 5.782172325201155e-05, "loss": 17.3012, "step": 28 }, { "epoch": 0.08969650106321284, "grad_norm": 25.935504913330078, "learning_rate": 5.392295478639225e-05, "loss": 18.7042, "step": 29 }, { "epoch": 0.09278948385849603, "grad_norm": 23.08302879333496, "learning_rate": 5e-05, "loss": 16.2506, "step": 30 }, { "epoch": 0.09588246665377924, "grad_norm": 24.507280349731445, "learning_rate": 4.607704521360776e-05, "loss": 18.5361, "step": 31 }, { "epoch": 0.09897544944906243, "grad_norm": 22.492021560668945, "learning_rate": 4.2178276747988446e-05, "loss": 16.9076, "step": 32 }, { "epoch": 0.10206843224434564, "grad_norm": 24.12836456298828, "learning_rate": 3.832773180720475e-05, "loss": 18.5358, "step": 33 }, { "epoch": 0.10516141503962884, "grad_norm": 22.608657836914062, "learning_rate": 3.4549150281252636e-05, "loss": 16.6398, "step": 34 }, { "epoch": 0.10825439783491204, "grad_norm": 21.76448631286621, "learning_rate": 3.086582838174551e-05, "loss": 16.4674, "step": 35 }, { "epoch": 0.11134738063019524, "grad_norm": 24.399173736572266, "learning_rate": 2.7300475013022663e-05, "loss": 16.219, "step": 36 }, { "epoch": 0.11444036342547845, "grad_norm": 21.240434646606445, "learning_rate": 2.3875071764202563e-05, "loss": 14.7258, "step": 37 }, { "epoch": 0.11753334622076164, "grad_norm": 23.667985916137695, "learning_rate": 2.061073738537635e-05, "loss": 17.4903, "step": 38 }, { "epoch": 0.12062632901604485, "grad_norm": 24.75313377380371, "learning_rate": 1.7527597583490822e-05, "loss": 18.4833, "step": 39 }, { "epoch": 0.12371931181132804, "grad_norm": 28.104583740234375, "learning_rate": 1.4644660940672627e-05, "loss": 16.1857, "step": 40 }, { "epoch": 0.12681229460661125, "grad_norm": 24.995805740356445, "learning_rate": 1.1979701719998453e-05, "loss": 16.9785, "step": 41 }, { "epoch": 0.12990527740189445, "grad_norm": 26.10293197631836, "learning_rate": 9.549150281252633e-06, "loss": 17.8392, "step": 42 }, { "epoch": 0.13299826019717764, "grad_norm": 22.538623809814453, "learning_rate": 7.367991782295391e-06, "loss": 16.8663, "step": 43 }, { "epoch": 0.13609124299246086, "grad_norm": 23.371549606323242, "learning_rate": 5.449673790581611e-06, "loss": 17.0096, "step": 44 }, { "epoch": 0.13918422578774406, "grad_norm": 22.70631980895996, "learning_rate": 3.8060233744356633e-06, "loss": 16.9947, "step": 45 }, { "epoch": 0.14227720858302725, "grad_norm": 26.935110092163086, "learning_rate": 2.4471741852423237e-06, "loss": 17.6166, "step": 46 }, { "epoch": 0.14537019137831045, "grad_norm": 24.898109436035156, "learning_rate": 1.3815039801161721e-06, "loss": 17.4678, "step": 47 }, { "epoch": 0.14846317417359367, "grad_norm": 23.253236770629883, "learning_rate": 6.15582970243117e-07, "loss": 17.145, "step": 48 }, { "epoch": 0.15155615696887687, "grad_norm": 23.85171890258789, "learning_rate": 1.5413331334360182e-07, "loss": 17.1861, "step": 49 }, { "epoch": 0.15464913976416006, "grad_norm": 22.377422332763672, "learning_rate": 0.0, "loss": 19.1859, "step": 50 }, { "epoch": 0.15464913976416006, "eval_loss": 1.06325101852417, "eval_runtime": 150.4088, "eval_samples_per_second": 3.623, "eval_steps_per_second": 1.815, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.5435789463506125e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }