{ "best_metric": 0.8287712335586548, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.3812246842983083, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.007624493685966166, "grad_norm": 0.273327499628067, "learning_rate": 5e-05, "loss": 0.9358, "step": 1 }, { "epoch": 0.007624493685966166, "eval_loss": 0.934563934803009, "eval_runtime": 12.2603, "eval_samples_per_second": 144.205, "eval_steps_per_second": 18.026, "step": 1 }, { "epoch": 0.015248987371932332, "grad_norm": 0.31604793667793274, "learning_rate": 0.0001, "loss": 0.9409, "step": 2 }, { "epoch": 0.0228734810578985, "grad_norm": 0.26551634073257446, "learning_rate": 9.989294616193017e-05, "loss": 0.8929, "step": 3 }, { "epoch": 0.030497974743864665, "grad_norm": 0.24841269850730896, "learning_rate": 9.957224306869053e-05, "loss": 0.9061, "step": 4 }, { "epoch": 0.03812246842983083, "grad_norm": 0.2744283676147461, "learning_rate": 9.903926402016153e-05, "loss": 0.9127, "step": 5 }, { "epoch": 0.045746962115797, "grad_norm": 0.264932781457901, "learning_rate": 9.829629131445342e-05, "loss": 0.8906, "step": 6 }, { "epoch": 0.05337145580176316, "grad_norm": 0.246871680021286, "learning_rate": 9.73465064747553e-05, "loss": 0.8858, "step": 7 }, { "epoch": 0.06099594948772933, "grad_norm": 0.2222467064857483, "learning_rate": 9.619397662556435e-05, "loss": 0.8231, "step": 8 }, { "epoch": 0.06862044317369549, "grad_norm": 0.224148690700531, "learning_rate": 9.484363707663442e-05, "loss": 0.8685, "step": 9 }, { "epoch": 0.07624493685966166, "grad_norm": 0.2204771637916565, "learning_rate": 9.330127018922194e-05, "loss": 0.8598, "step": 10 }, { "epoch": 0.08386943054562783, "grad_norm": 0.2148129940032959, "learning_rate": 9.157348061512727e-05, "loss": 0.8643, "step": 11 }, { "epoch": 0.091493924231594, "grad_norm": 0.2276252657175064, "learning_rate": 8.966766701456177e-05, "loss": 0.8292, "step": 12 }, { "epoch": 0.09911841791756017, "grad_norm": 0.16194580495357513, "learning_rate": 8.759199037394887e-05, "loss": 0.9297, "step": 13 }, { "epoch": 0.10674291160352632, "grad_norm": 0.1492803692817688, "learning_rate": 8.535533905932738e-05, "loss": 0.8669, "step": 14 }, { "epoch": 0.11436740528949249, "grad_norm": 0.1596936732530594, "learning_rate": 8.296729075500344e-05, "loss": 0.8862, "step": 15 }, { "epoch": 0.12199189897545866, "grad_norm": 0.15809616446495056, "learning_rate": 8.043807145043604e-05, "loss": 0.8595, "step": 16 }, { "epoch": 0.12961639266142483, "grad_norm": 0.1597907990217209, "learning_rate": 7.777851165098012e-05, "loss": 0.8863, "step": 17 }, { "epoch": 0.13724088634739098, "grad_norm": 0.15602456033229828, "learning_rate": 7.500000000000001e-05, "loss": 0.8556, "step": 18 }, { "epoch": 0.14486538003335717, "grad_norm": 0.1547441929578781, "learning_rate": 7.211443451095007e-05, "loss": 0.8208, "step": 19 }, { "epoch": 0.15248987371932332, "grad_norm": 0.16280899941921234, "learning_rate": 6.91341716182545e-05, "loss": 0.8561, "step": 20 }, { "epoch": 0.1601143674052895, "grad_norm": 0.17006684839725494, "learning_rate": 6.607197326515808e-05, "loss": 0.8499, "step": 21 }, { "epoch": 0.16773886109125566, "grad_norm": 0.1722775101661682, "learning_rate": 6.294095225512603e-05, "loss": 0.8458, "step": 22 }, { "epoch": 0.1753633547772218, "grad_norm": 0.1805761754512787, "learning_rate": 5.9754516100806423e-05, "loss": 0.8157, "step": 23 }, { "epoch": 0.182987848463188, "grad_norm": 0.18934600055217743, "learning_rate": 5.6526309611002594e-05, "loss": 0.8157, "step": 24 }, { "epoch": 0.19061234214915415, "grad_norm": 0.22315742075443268, "learning_rate": 5.327015646150716e-05, "loss": 0.8355, "step": 25 }, { "epoch": 0.19061234214915415, "eval_loss": 0.8377934694290161, "eval_runtime": 12.1988, "eval_samples_per_second": 144.932, "eval_steps_per_second": 18.117, "step": 25 }, { "epoch": 0.19823683583512033, "grad_norm": 0.15700483322143555, "learning_rate": 5e-05, "loss": 0.8346, "step": 26 }, { "epoch": 0.2058613295210865, "grad_norm": 0.1602972149848938, "learning_rate": 4.6729843538492847e-05, "loss": 0.8255, "step": 27 }, { "epoch": 0.21348582320705264, "grad_norm": 0.15739409625530243, "learning_rate": 4.347369038899744e-05, "loss": 0.842, "step": 28 }, { "epoch": 0.22111031689301883, "grad_norm": 0.15054009854793549, "learning_rate": 4.0245483899193595e-05, "loss": 0.8599, "step": 29 }, { "epoch": 0.22873481057898498, "grad_norm": 0.1502337008714676, "learning_rate": 3.705904774487396e-05, "loss": 0.817, "step": 30 }, { "epoch": 0.23635930426495116, "grad_norm": 0.14839516580104828, "learning_rate": 3.392802673484193e-05, "loss": 0.8335, "step": 31 }, { "epoch": 0.24398379795091732, "grad_norm": 0.1509093940258026, "learning_rate": 3.086582838174551e-05, "loss": 0.8419, "step": 32 }, { "epoch": 0.2516082916368835, "grad_norm": 0.15149198472499847, "learning_rate": 2.7885565489049946e-05, "loss": 0.8169, "step": 33 }, { "epoch": 0.25923278532284966, "grad_norm": 0.15918795764446259, "learning_rate": 2.500000000000001e-05, "loss": 0.859, "step": 34 }, { "epoch": 0.26685727900881584, "grad_norm": 0.1778258979320526, "learning_rate": 2.2221488349019903e-05, "loss": 0.8393, "step": 35 }, { "epoch": 0.27448177269478197, "grad_norm": 0.19277341663837433, "learning_rate": 1.9561928549563968e-05, "loss": 0.8147, "step": 36 }, { "epoch": 0.28210626638074815, "grad_norm": 0.19680185616016388, "learning_rate": 1.703270924499656e-05, "loss": 0.78, "step": 37 }, { "epoch": 0.28973076006671433, "grad_norm": 0.12966406345367432, "learning_rate": 1.4644660940672627e-05, "loss": 0.8441, "step": 38 }, { "epoch": 0.29735525375268046, "grad_norm": 0.12343523651361465, "learning_rate": 1.2408009626051137e-05, "loss": 0.8377, "step": 39 }, { "epoch": 0.30497974743864664, "grad_norm": 0.12981387972831726, "learning_rate": 1.0332332985438248e-05, "loss": 0.8426, "step": 40 }, { "epoch": 0.3126042411246128, "grad_norm": 0.13032592833042145, "learning_rate": 8.426519384872733e-06, "loss": 0.84, "step": 41 }, { "epoch": 0.320228734810579, "grad_norm": 0.1364886462688446, "learning_rate": 6.698729810778065e-06, "loss": 0.8459, "step": 42 }, { "epoch": 0.32785322849654513, "grad_norm": 0.13743382692337036, "learning_rate": 5.156362923365588e-06, "loss": 0.8038, "step": 43 }, { "epoch": 0.3354777221825113, "grad_norm": 0.14321595430374146, "learning_rate": 3.8060233744356633e-06, "loss": 0.8594, "step": 44 }, { "epoch": 0.3431022158684775, "grad_norm": 0.139565110206604, "learning_rate": 2.653493525244721e-06, "loss": 0.8038, "step": 45 }, { "epoch": 0.3507267095544436, "grad_norm": 0.15720701217651367, "learning_rate": 1.70370868554659e-06, "loss": 0.8524, "step": 46 }, { "epoch": 0.3583512032404098, "grad_norm": 0.16059865057468414, "learning_rate": 9.607359798384785e-07, "loss": 0.8119, "step": 47 }, { "epoch": 0.365975696926376, "grad_norm": 0.1649278700351715, "learning_rate": 4.277569313094809e-07, "loss": 0.7992, "step": 48 }, { "epoch": 0.3736001906123422, "grad_norm": 0.17470909655094147, "learning_rate": 1.0705383806982606e-07, "loss": 0.8197, "step": 49 }, { "epoch": 0.3812246842983083, "grad_norm": 0.21436592936515808, "learning_rate": 0.0, "loss": 0.8206, "step": 50 }, { "epoch": 0.3812246842983083, "eval_loss": 0.8287712335586548, "eval_runtime": 12.2583, "eval_samples_per_second": 144.228, "eval_steps_per_second": 18.029, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.913400764792832e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }