{ "best_metric": 11.756505966186523, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.2518891687657431, "eval_steps": 50, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.005037783375314861, "grad_norm": 0.11665327101945877, "learning_rate": 4e-05, "loss": 11.7818, "step": 1 }, { "epoch": 0.005037783375314861, "eval_loss": 11.777442932128906, "eval_runtime": 1.3844, "eval_samples_per_second": 60.678, "eval_steps_per_second": 30.339, "step": 1 }, { "epoch": 0.010075566750629723, "grad_norm": 0.09411647170782089, "learning_rate": 8e-05, "loss": 11.7749, "step": 2 }, { "epoch": 0.015113350125944584, "grad_norm": 0.11491205543279648, "learning_rate": 0.00012, "loss": 11.7791, "step": 3 }, { "epoch": 0.020151133501259445, "grad_norm": 0.10814717411994934, "learning_rate": 0.00016, "loss": 11.7724, "step": 4 }, { "epoch": 0.02518891687657431, "grad_norm": 0.09395021945238113, "learning_rate": 0.0002, "loss": 11.7741, "step": 5 }, { "epoch": 0.030226700251889168, "grad_norm": 0.08854291588068008, "learning_rate": 0.00019994532573409262, "loss": 11.7739, "step": 6 }, { "epoch": 0.03526448362720403, "grad_norm": 0.09998143464326859, "learning_rate": 0.00019978136272187747, "loss": 11.7719, "step": 7 }, { "epoch": 0.04030226700251889, "grad_norm": 0.1092071384191513, "learning_rate": 0.00019950829025450114, "loss": 11.7766, "step": 8 }, { "epoch": 0.04534005037783375, "grad_norm": 0.13437338173389435, "learning_rate": 0.00019912640693269752, "loss": 11.7695, "step": 9 }, { "epoch": 0.05037783375314862, "grad_norm": 0.10191803425550461, "learning_rate": 0.00019863613034027224, "loss": 11.7726, "step": 10 }, { "epoch": 0.055415617128463476, "grad_norm": 0.13112697005271912, "learning_rate": 0.00019803799658748094, "loss": 11.7715, "step": 11 }, { "epoch": 0.060453400503778336, "grad_norm": 0.1708078533411026, "learning_rate": 0.0001973326597248006, "loss": 11.765, "step": 12 }, { "epoch": 0.0654911838790932, "grad_norm": 0.14378079771995544, "learning_rate": 0.00019652089102773488, "loss": 11.7693, "step": 13 }, { "epoch": 0.07052896725440806, "grad_norm": 0.11845815926790237, "learning_rate": 0.00019560357815343577, "loss": 11.754, "step": 14 }, { "epoch": 0.07556675062972293, "grad_norm": 0.13851119577884674, "learning_rate": 0.00019458172417006347, "loss": 11.7614, "step": 15 }, { "epoch": 0.08060453400503778, "grad_norm": 0.14140120148658752, "learning_rate": 0.0001934564464599461, "loss": 11.7656, "step": 16 }, { "epoch": 0.08564231738035265, "grad_norm": 0.21620717644691467, "learning_rate": 0.00019222897549773848, "loss": 11.7735, "step": 17 }, { "epoch": 0.0906801007556675, "grad_norm": 0.2548966407775879, "learning_rate": 0.00019090065350491626, "loss": 11.775, "step": 18 }, { "epoch": 0.09571788413098237, "grad_norm": 0.2564621865749359, "learning_rate": 0.00018947293298207635, "loss": 11.7763, "step": 19 }, { "epoch": 0.10075566750629723, "grad_norm": 0.24637818336486816, "learning_rate": 0.0001879473751206489, "loss": 11.7768, "step": 20 }, { "epoch": 0.10579345088161209, "grad_norm": 0.20252592861652374, "learning_rate": 0.00018632564809575742, "loss": 11.7709, "step": 21 }, { "epoch": 0.11083123425692695, "grad_norm": 0.1991090625524521, "learning_rate": 0.00018460952524209355, "loss": 11.77, "step": 22 }, { "epoch": 0.11586901763224182, "grad_norm": 0.19447946548461914, "learning_rate": 0.00018280088311480201, "loss": 11.7763, "step": 23 }, { "epoch": 0.12090680100755667, "grad_norm": 0.31570008397102356, "learning_rate": 0.00018090169943749476, "loss": 11.7832, "step": 24 }, { "epoch": 0.12594458438287154, "grad_norm": 0.26807549595832825, "learning_rate": 0.00017891405093963938, "loss": 11.781, "step": 25 }, { "epoch": 0.1309823677581864, "grad_norm": 0.20723332464694977, "learning_rate": 0.00017684011108568592, "loss": 11.7613, "step": 26 }, { "epoch": 0.13602015113350127, "grad_norm": 0.16287362575531006, "learning_rate": 0.0001746821476984154, "loss": 11.7537, "step": 27 }, { "epoch": 0.14105793450881612, "grad_norm": 0.19770941138267517, "learning_rate": 0.00017244252047910892, "loss": 11.7782, "step": 28 }, { "epoch": 0.14609571788413098, "grad_norm": 0.20761863887310028, "learning_rate": 0.00017012367842724887, "loss": 11.758, "step": 29 }, { "epoch": 0.15113350125944586, "grad_norm": 0.25872254371643066, "learning_rate": 0.00016772815716257412, "loss": 11.7536, "step": 30 }, { "epoch": 0.1561712846347607, "grad_norm": 0.3000946342945099, "learning_rate": 0.00016525857615241687, "loss": 11.7583, "step": 31 }, { "epoch": 0.16120906801007556, "grad_norm": 0.2385156750679016, "learning_rate": 0.0001627176358473537, "loss": 11.7501, "step": 32 }, { "epoch": 0.16624685138539042, "grad_norm": 0.2373785525560379, "learning_rate": 0.00016010811472830252, "loss": 11.7478, "step": 33 }, { "epoch": 0.1712846347607053, "grad_norm": 0.5123424530029297, "learning_rate": 0.00015743286626829437, "loss": 11.788, "step": 34 }, { "epoch": 0.17632241813602015, "grad_norm": 0.4221782386302948, "learning_rate": 0.00015469481581224272, "loss": 11.7738, "step": 35 }, { "epoch": 0.181360201511335, "grad_norm": 0.3816068768501282, "learning_rate": 0.00015189695737812152, "loss": 11.771, "step": 36 }, { "epoch": 0.18639798488664988, "grad_norm": 0.34051746129989624, "learning_rate": 0.00014904235038305083, "loss": 11.7577, "step": 37 }, { "epoch": 0.19143576826196473, "grad_norm": 0.2573257386684418, "learning_rate": 0.0001461341162978688, "loss": 11.7447, "step": 38 }, { "epoch": 0.1964735516372796, "grad_norm": 0.5065600275993347, "learning_rate": 0.00014317543523384928, "loss": 11.7931, "step": 39 }, { "epoch": 0.20151133501259447, "grad_norm": 0.48842620849609375, "learning_rate": 0.00014016954246529696, "loss": 11.7782, "step": 40 }, { "epoch": 0.20654911838790932, "grad_norm": 0.5999230742454529, "learning_rate": 0.00013711972489182208, "loss": 11.7796, "step": 41 }, { "epoch": 0.21158690176322417, "grad_norm": 0.5456677079200745, "learning_rate": 0.00013402931744416433, "loss": 11.7846, "step": 42 }, { "epoch": 0.21662468513853905, "grad_norm": 0.544049859046936, "learning_rate": 0.00013090169943749476, "loss": 11.7765, "step": 43 }, { "epoch": 0.2216624685138539, "grad_norm": 0.4290430545806885, "learning_rate": 0.00012774029087618446, "loss": 11.7786, "step": 44 }, { "epoch": 0.22670025188916876, "grad_norm": 0.624894380569458, "learning_rate": 0.00012454854871407994, "loss": 11.7792, "step": 45 }, { "epoch": 0.23173803526448364, "grad_norm": 0.6616364121437073, "learning_rate": 0.0001213299630743747, "loss": 11.7779, "step": 46 }, { "epoch": 0.2367758186397985, "grad_norm": 0.6500002145767212, "learning_rate": 0.000118088053433211, "loss": 11.7756, "step": 47 }, { "epoch": 0.24181360201511334, "grad_norm": 0.7946918606758118, "learning_rate": 0.0001148263647711842, "loss": 11.7614, "step": 48 }, { "epoch": 0.24685138539042822, "grad_norm": 0.7269666790962219, "learning_rate": 0.00011154846369695863, "loss": 11.7358, "step": 49 }, { "epoch": 0.2518891687657431, "grad_norm": 0.14651153981685638, "learning_rate": 0.00010825793454723325, "loss": 11.7679, "step": 50 }, { "epoch": 0.2518891687657431, "eval_loss": 11.756505966186523, "eval_runtime": 1.3815, "eval_samples_per_second": 60.803, "eval_steps_per_second": 30.401, "step": 50 } ], "logging_steps": 1, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 20370004377600.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }