{ "best_metric": 0.5923976302146912, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.06441742491343909, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0012883484982687816, "grad_norm": 47.613548278808594, "learning_rate": 5e-05, "loss": 60.1193, "step": 1 }, { "epoch": 0.0012883484982687816, "eval_loss": 2.737609624862671, "eval_runtime": 4.1599, "eval_samples_per_second": 12.02, "eval_steps_per_second": 3.125, "step": 1 }, { "epoch": 0.0025766969965375633, "grad_norm": 52.00191116333008, "learning_rate": 0.0001, "loss": 65.2752, "step": 2 }, { "epoch": 0.0038650454948063453, "grad_norm": 53.02202224731445, "learning_rate": 9.990365154573717e-05, "loss": 71.0087, "step": 3 }, { "epoch": 0.0051533939930751265, "grad_norm": 49.392635345458984, "learning_rate": 9.961501876182148e-05, "loss": 64.6277, "step": 4 }, { "epoch": 0.006441742491343909, "grad_norm": 53.78055953979492, "learning_rate": 9.913533761814537e-05, "loss": 63.4741, "step": 5 }, { "epoch": 0.007730090989612691, "grad_norm": 57.29056167602539, "learning_rate": 9.846666218300807e-05, "loss": 58.9412, "step": 6 }, { "epoch": 0.009018439487881473, "grad_norm": 53.10818862915039, "learning_rate": 9.761185582727977e-05, "loss": 55.5142, "step": 7 }, { "epoch": 0.010306787986150253, "grad_norm": 60.22203826904297, "learning_rate": 9.657457896300791e-05, "loss": 53.7236, "step": 8 }, { "epoch": 0.011595136484419035, "grad_norm": 43.83124923706055, "learning_rate": 9.535927336897098e-05, "loss": 49.4407, "step": 9 }, { "epoch": 0.012883484982687817, "grad_norm": 55.66758346557617, "learning_rate": 9.397114317029975e-05, "loss": 47.6461, "step": 10 }, { "epoch": 0.0141718334809566, "grad_norm": 93.18301391601562, "learning_rate": 9.241613255361455e-05, "loss": 48.988, "step": 11 }, { "epoch": 0.015460181979225381, "grad_norm": 64.65294647216797, "learning_rate": 9.070090031310558e-05, "loss": 47.249, "step": 12 }, { "epoch": 0.016748530477494163, "grad_norm": 64.37872314453125, "learning_rate": 8.883279133655399e-05, "loss": 40.3842, "step": 13 }, { "epoch": 0.018036878975762945, "grad_norm": 62.399436950683594, "learning_rate": 8.681980515339464e-05, "loss": 31.3045, "step": 14 }, { "epoch": 0.019325227474031724, "grad_norm": 30.31430435180664, "learning_rate": 8.467056167950311e-05, "loss": 32.2455, "step": 15 }, { "epoch": 0.020613575972300506, "grad_norm": 27.626392364501953, "learning_rate": 8.239426430539243e-05, "loss": 30.5162, "step": 16 }, { "epoch": 0.021901924470569288, "grad_norm": 26.39891242980957, "learning_rate": 8.000066048588211e-05, "loss": 31.7303, "step": 17 }, { "epoch": 0.02319027296883807, "grad_norm": 24.801267623901367, "learning_rate": 7.75e-05, "loss": 31.2513, "step": 18 }, { "epoch": 0.024478621467106852, "grad_norm": 24.73192596435547, "learning_rate": 7.490299105985507e-05, "loss": 31.0303, "step": 19 }, { "epoch": 0.025766969965375634, "grad_norm": 25.891586303710938, "learning_rate": 7.222075445642904e-05, "loss": 30.0861, "step": 20 }, { "epoch": 0.027055318463644416, "grad_norm": 24.747957229614258, "learning_rate": 6.946477593864228e-05, "loss": 29.9776, "step": 21 }, { "epoch": 0.0283436669619132, "grad_norm": 26.517791748046875, "learning_rate": 6.664685702961344e-05, "loss": 30.5861, "step": 22 }, { "epoch": 0.02963201546018198, "grad_norm": 25.020784378051758, "learning_rate": 6.377906449072578e-05, "loss": 27.3878, "step": 23 }, { "epoch": 0.030920363958450763, "grad_norm": 26.573110580444336, "learning_rate": 6.087367864990233e-05, "loss": 27.9549, "step": 24 }, { "epoch": 0.032208712456719545, "grad_norm": 32.546878814697266, "learning_rate": 5.794314081535644e-05, "loss": 25.2439, "step": 25 }, { "epoch": 0.032208712456719545, "eval_loss": 0.8477029204368591, "eval_runtime": 4.042, "eval_samples_per_second": 12.37, "eval_steps_per_second": 3.216, "step": 25 }, { "epoch": 0.03349706095498833, "grad_norm": 36.677696228027344, "learning_rate": 5.500000000000001e-05, "loss": 24.531, "step": 26 }, { "epoch": 0.03478540945325711, "grad_norm": 29.125059127807617, "learning_rate": 5.205685918464356e-05, "loss": 20.848, "step": 27 }, { "epoch": 0.03607375795152589, "grad_norm": 25.221555709838867, "learning_rate": 4.912632135009769e-05, "loss": 20.9429, "step": 28 }, { "epoch": 0.03736210644979467, "grad_norm": 24.186708450317383, "learning_rate": 4.6220935509274235e-05, "loss": 21.7346, "step": 29 }, { "epoch": 0.03865045494806345, "grad_norm": 24.044862747192383, "learning_rate": 4.3353142970386564e-05, "loss": 21.4339, "step": 30 }, { "epoch": 0.03993880344633223, "grad_norm": 24.531862258911133, "learning_rate": 4.053522406135775e-05, "loss": 20.8275, "step": 31 }, { "epoch": 0.04122715194460101, "grad_norm": 24.415983200073242, "learning_rate": 3.777924554357096e-05, "loss": 20.4433, "step": 32 }, { "epoch": 0.042515500442869794, "grad_norm": 20.245210647583008, "learning_rate": 3.509700894014496e-05, "loss": 20.7097, "step": 33 }, { "epoch": 0.043803848941138576, "grad_norm": 20.419910430908203, "learning_rate": 3.250000000000001e-05, "loss": 20.0465, "step": 34 }, { "epoch": 0.04509219743940736, "grad_norm": 21.229557037353516, "learning_rate": 2.9999339514117912e-05, "loss": 19.755, "step": 35 }, { "epoch": 0.04638054593767614, "grad_norm": 24.20121955871582, "learning_rate": 2.760573569460757e-05, "loss": 20.3669, "step": 36 }, { "epoch": 0.04766889443594492, "grad_norm": 27.731250762939453, "learning_rate": 2.53294383204969e-05, "loss": 20.5599, "step": 37 }, { "epoch": 0.048957242934213704, "grad_norm": 22.39792251586914, "learning_rate": 2.3180194846605367e-05, "loss": 19.2265, "step": 38 }, { "epoch": 0.050245591432482486, "grad_norm": 23.370872497558594, "learning_rate": 2.1167208663446025e-05, "loss": 16.5371, "step": 39 }, { "epoch": 0.05153393993075127, "grad_norm": 30.893217086791992, "learning_rate": 1.9299099686894423e-05, "loss": 17.1265, "step": 40 }, { "epoch": 0.05282228842902005, "grad_norm": 23.17006492614746, "learning_rate": 1.758386744638546e-05, "loss": 16.948, "step": 41 }, { "epoch": 0.05411063692728883, "grad_norm": 19.363140106201172, "learning_rate": 1.602885682970026e-05, "loss": 18.092, "step": 42 }, { "epoch": 0.055398985425557615, "grad_norm": 21.08965301513672, "learning_rate": 1.464072663102903e-05, "loss": 18.0266, "step": 43 }, { "epoch": 0.0566873339238264, "grad_norm": 20.33384895324707, "learning_rate": 1.3425421036992098e-05, "loss": 17.16, "step": 44 }, { "epoch": 0.05797568242209518, "grad_norm": 22.02051544189453, "learning_rate": 1.2388144172720251e-05, "loss": 17.0887, "step": 45 }, { "epoch": 0.05926403092036396, "grad_norm": 24.07645606994629, "learning_rate": 1.1533337816991932e-05, "loss": 16.9783, "step": 46 }, { "epoch": 0.06055237941863274, "grad_norm": 25.257797241210938, "learning_rate": 1.0864662381854632e-05, "loss": 17.5447, "step": 47 }, { "epoch": 0.061840727916901525, "grad_norm": 32.02328872680664, "learning_rate": 1.0384981238178534e-05, "loss": 18.0569, "step": 48 }, { "epoch": 0.06312907641517031, "grad_norm": 27.761093139648438, "learning_rate": 1.0096348454262845e-05, "loss": 17.8059, "step": 49 }, { "epoch": 0.06441742491343909, "grad_norm": 36.194374084472656, "learning_rate": 1e-05, "loss": 17.8447, "step": 50 }, { "epoch": 0.06441742491343909, "eval_loss": 0.5923976302146912, "eval_runtime": 4.1863, "eval_samples_per_second": 11.944, "eval_steps_per_second": 3.105, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.268143856628531e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }