{ "best_metric": 6.92307186126709, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.010162214346506104, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00020324428693012207, "grad_norm": 0.08410007506608963, "learning_rate": 5e-05, "loss": 6.9386, "step": 1 }, { "epoch": 0.00020324428693012207, "eval_loss": 6.941287040710449, "eval_runtime": 0.1174, "eval_samples_per_second": 425.819, "eval_steps_per_second": 110.713, "step": 1 }, { "epoch": 0.00040648857386024414, "grad_norm": 0.08960605412721634, "learning_rate": 0.0001, "loss": 6.9386, "step": 2 }, { "epoch": 0.0006097328607903663, "grad_norm": 0.08601795881986618, "learning_rate": 9.990365154573717e-05, "loss": 6.9382, "step": 3 }, { "epoch": 0.0008129771477204883, "grad_norm": 0.09231054782867432, "learning_rate": 9.961501876182148e-05, "loss": 6.9385, "step": 4 }, { "epoch": 0.0010162214346506103, "grad_norm": 0.09385655075311661, "learning_rate": 9.913533761814537e-05, "loss": 6.9373, "step": 5 }, { "epoch": 0.0012194657215807325, "grad_norm": 0.10033882409334183, "learning_rate": 9.846666218300807e-05, "loss": 6.9395, "step": 6 }, { "epoch": 0.0014227100085108546, "grad_norm": 0.10151656717061996, "learning_rate": 9.761185582727977e-05, "loss": 6.9374, "step": 7 }, { "epoch": 0.0016259542954409766, "grad_norm": 0.10626529902219772, "learning_rate": 9.657457896300791e-05, "loss": 6.9356, "step": 8 }, { "epoch": 0.0018291985823710986, "grad_norm": 0.1114870011806488, "learning_rate": 9.535927336897098e-05, "loss": 6.9349, "step": 9 }, { "epoch": 0.0020324428693012206, "grad_norm": 0.12120575457811356, "learning_rate": 9.397114317029975e-05, "loss": 6.9321, "step": 10 }, { "epoch": 0.0022356871562313426, "grad_norm": 0.12660174071788788, "learning_rate": 9.241613255361455e-05, "loss": 6.9331, "step": 11 }, { "epoch": 0.002438931443161465, "grad_norm": 0.14304086565971375, "learning_rate": 9.070090031310558e-05, "loss": 6.9333, "step": 12 }, { "epoch": 0.002642175730091587, "grad_norm": 0.10343854874372482, "learning_rate": 8.883279133655399e-05, "loss": 6.9351, "step": 13 }, { "epoch": 0.002845420017021709, "grad_norm": 0.08956442028284073, "learning_rate": 8.681980515339464e-05, "loss": 6.9348, "step": 14 }, { "epoch": 0.003048664303951831, "grad_norm": 0.09538648277521133, "learning_rate": 8.467056167950311e-05, "loss": 6.9332, "step": 15 }, { "epoch": 0.003251908590881953, "grad_norm": 0.09813094139099121, "learning_rate": 8.239426430539243e-05, "loss": 6.9334, "step": 16 }, { "epoch": 0.003455152877812075, "grad_norm": 0.09969386458396912, "learning_rate": 8.000066048588211e-05, "loss": 6.9312, "step": 17 }, { "epoch": 0.003658397164742197, "grad_norm": 0.10542583465576172, "learning_rate": 7.75e-05, "loss": 6.9304, "step": 18 }, { "epoch": 0.003861641451672319, "grad_norm": 0.11531369388103485, "learning_rate": 7.490299105985507e-05, "loss": 6.9319, "step": 19 }, { "epoch": 0.004064885738602441, "grad_norm": 0.11504054814577103, "learning_rate": 7.222075445642904e-05, "loss": 6.9309, "step": 20 }, { "epoch": 0.004268130025532563, "grad_norm": 0.12010727822780609, "learning_rate": 6.946477593864228e-05, "loss": 6.9303, "step": 21 }, { "epoch": 0.004471374312462685, "grad_norm": 0.12595853209495544, "learning_rate": 6.664685702961344e-05, "loss": 6.9275, "step": 22 }, { "epoch": 0.004674618599392807, "grad_norm": 0.1284550428390503, "learning_rate": 6.377906449072578e-05, "loss": 6.9256, "step": 23 }, { "epoch": 0.00487786288632293, "grad_norm": 0.14215584099292755, "learning_rate": 6.087367864990233e-05, "loss": 6.9281, "step": 24 }, { "epoch": 0.005081107173253052, "grad_norm": 0.16178622841835022, "learning_rate": 5.794314081535644e-05, "loss": 6.9265, "step": 25 }, { "epoch": 0.005081107173253052, "eval_loss": 6.92898416519165, "eval_runtime": 0.1272, "eval_samples_per_second": 392.966, "eval_steps_per_second": 102.171, "step": 25 }, { "epoch": 0.005284351460183174, "grad_norm": 0.09798461943864822, "learning_rate": 5.500000000000001e-05, "loss": 6.9302, "step": 26 }, { "epoch": 0.005487595747113296, "grad_norm": 0.09589119255542755, "learning_rate": 5.205685918464356e-05, "loss": 6.9309, "step": 27 }, { "epoch": 0.005690840034043418, "grad_norm": 0.10878289490938187, "learning_rate": 4.912632135009769e-05, "loss": 6.9289, "step": 28 }, { "epoch": 0.00589408432097354, "grad_norm": 0.10759259760379791, "learning_rate": 4.6220935509274235e-05, "loss": 6.9276, "step": 29 }, { "epoch": 0.006097328607903662, "grad_norm": 0.10860608518123627, "learning_rate": 4.3353142970386564e-05, "loss": 6.9266, "step": 30 }, { "epoch": 0.006300572894833784, "grad_norm": 0.12264492362737656, "learning_rate": 4.053522406135775e-05, "loss": 6.9263, "step": 31 }, { "epoch": 0.006503817181763906, "grad_norm": 0.12330181896686554, "learning_rate": 3.777924554357096e-05, "loss": 6.9263, "step": 32 }, { "epoch": 0.006707061468694028, "grad_norm": 0.12452418357133865, "learning_rate": 3.509700894014496e-05, "loss": 6.9239, "step": 33 }, { "epoch": 0.00691030575562415, "grad_norm": 0.131307452917099, "learning_rate": 3.250000000000001e-05, "loss": 6.9239, "step": 34 }, { "epoch": 0.007113550042554272, "grad_norm": 0.1308247298002243, "learning_rate": 2.9999339514117912e-05, "loss": 6.9252, "step": 35 }, { "epoch": 0.007316794329484394, "grad_norm": 0.14916321635246277, "learning_rate": 2.760573569460757e-05, "loss": 6.924, "step": 36 }, { "epoch": 0.007520038616414516, "grad_norm": 0.15847186744213104, "learning_rate": 2.53294383204969e-05, "loss": 6.9211, "step": 37 }, { "epoch": 0.007723282903344638, "grad_norm": 0.1158829927444458, "learning_rate": 2.3180194846605367e-05, "loss": 6.925, "step": 38 }, { "epoch": 0.00792652719027476, "grad_norm": 0.10773951560258865, "learning_rate": 2.1167208663446025e-05, "loss": 6.9268, "step": 39 }, { "epoch": 0.008129771477204882, "grad_norm": 0.11924441158771515, "learning_rate": 1.9299099686894423e-05, "loss": 6.9269, "step": 40 }, { "epoch": 0.008333015764135004, "grad_norm": 0.12030573934316635, "learning_rate": 1.758386744638546e-05, "loss": 6.9251, "step": 41 }, { "epoch": 0.008536260051065126, "grad_norm": 0.12425759434700012, "learning_rate": 1.602885682970026e-05, "loss": 6.9232, "step": 42 }, { "epoch": 0.008739504337995248, "grad_norm": 0.12707437574863434, "learning_rate": 1.464072663102903e-05, "loss": 6.9242, "step": 43 }, { "epoch": 0.00894274862492537, "grad_norm": 0.13171014189720154, "learning_rate": 1.3425421036992098e-05, "loss": 6.9229, "step": 44 }, { "epoch": 0.009145992911855493, "grad_norm": 0.12991087138652802, "learning_rate": 1.2388144172720251e-05, "loss": 6.921, "step": 45 }, { "epoch": 0.009349237198785615, "grad_norm": 0.13416709005832672, "learning_rate": 1.1533337816991932e-05, "loss": 6.9232, "step": 46 }, { "epoch": 0.009552481485715738, "grad_norm": 0.13503502309322357, "learning_rate": 1.0864662381854632e-05, "loss": 6.9227, "step": 47 }, { "epoch": 0.00975572577264586, "grad_norm": 0.14602893590927124, "learning_rate": 1.0384981238178534e-05, "loss": 6.9204, "step": 48 }, { "epoch": 0.009958970059575982, "grad_norm": 0.14785030484199524, "learning_rate": 1.0096348454262845e-05, "loss": 6.9203, "step": 49 }, { "epoch": 0.010162214346506104, "grad_norm": 0.17115415632724762, "learning_rate": 1e-05, "loss": 6.9184, "step": 50 }, { "epoch": 0.010162214346506104, "eval_loss": 6.92307186126709, "eval_runtime": 0.1177, "eval_samples_per_second": 424.749, "eval_steps_per_second": 110.435, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5705957376000.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }