{ "best_metric": 0.2451000064611435, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.003562776115148924, "eval_steps": 50, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 7.125552230297848e-05, "grad_norm": 1.042211651802063, "learning_rate": 1e-06, "loss": 0.3057, "step": 1 }, { "epoch": 7.125552230297848e-05, "eval_loss": 0.7187018990516663, "eval_runtime": 728.4647, "eval_samples_per_second": 8.112, "eval_steps_per_second": 2.029, "step": 1 }, { "epoch": 0.00014251104460595695, "grad_norm": 1.606184959411621, "learning_rate": 2e-06, "loss": 0.4561, "step": 2 }, { "epoch": 0.00021376656690893543, "grad_norm": 1.5126073360443115, "learning_rate": 3e-06, "loss": 0.4418, "step": 3 }, { "epoch": 0.0002850220892119139, "grad_norm": 1.9431225061416626, "learning_rate": 4e-06, "loss": 0.5693, "step": 4 }, { "epoch": 0.0003562776115148924, "grad_norm": 1.9813414812088013, "learning_rate": 4.9999999999999996e-06, "loss": 0.5906, "step": 5 }, { "epoch": 0.00042753313381787086, "grad_norm": 1.8735884428024292, "learning_rate": 6e-06, "loss": 0.4769, "step": 6 }, { "epoch": 0.0004987886561208494, "grad_norm": 1.8006073236465454, "learning_rate": 7e-06, "loss": 0.5359, "step": 7 }, { "epoch": 0.0005700441784238278, "grad_norm": 1.539935827255249, "learning_rate": 8e-06, "loss": 0.4696, "step": 8 }, { "epoch": 0.0006412997007268063, "grad_norm": 2.0199615955352783, "learning_rate": 9e-06, "loss": 0.5917, "step": 9 }, { "epoch": 0.0007125552230297848, "grad_norm": 1.9582463502883911, "learning_rate": 9.999999999999999e-06, "loss": 0.4258, "step": 10 }, { "epoch": 0.0007838107453327633, "grad_norm": 1.63983952999115, "learning_rate": 1.1e-05, "loss": 0.3967, "step": 11 }, { "epoch": 0.0008550662676357417, "grad_norm": 1.615180253982544, "learning_rate": 1.2e-05, "loss": 0.4462, "step": 12 }, { "epoch": 0.0009263217899387203, "grad_norm": 1.6889790296554565, "learning_rate": 1.3000000000000001e-05, "loss": 0.3865, "step": 13 }, { "epoch": 0.0009975773122416988, "grad_norm": 1.7628371715545654, "learning_rate": 1.4e-05, "loss": 0.5235, "step": 14 }, { "epoch": 0.0010688328345446773, "grad_norm": 1.7251651287078857, "learning_rate": 1.5e-05, "loss": 0.2985, "step": 15 }, { "epoch": 0.0011400883568476556, "grad_norm": 1.7641141414642334, "learning_rate": 1.6e-05, "loss": 0.4062, "step": 16 }, { "epoch": 0.0012113438791506342, "grad_norm": 1.7730730772018433, "learning_rate": 1.7e-05, "loss": 0.2535, "step": 17 }, { "epoch": 0.0012825994014536127, "grad_norm": 1.6763519048690796, "learning_rate": 1.8e-05, "loss": 0.2611, "step": 18 }, { "epoch": 0.0013538549237565912, "grad_norm": 1.8056228160858154, "learning_rate": 1.9e-05, "loss": 0.3504, "step": 19 }, { "epoch": 0.0014251104460595695, "grad_norm": 1.7897692918777466, "learning_rate": 1.9999999999999998e-05, "loss": 0.2584, "step": 20 }, { "epoch": 0.001496365968362548, "grad_norm": 1.8470932245254517, "learning_rate": 2.1e-05, "loss": 0.2949, "step": 21 }, { "epoch": 0.0015676214906655266, "grad_norm": 1.2270846366882324, "learning_rate": 2.2e-05, "loss": 0.2835, "step": 22 }, { "epoch": 0.0016388770129685051, "grad_norm": 1.2478747367858887, "learning_rate": 2.3000000000000003e-05, "loss": 0.2585, "step": 23 }, { "epoch": 0.0017101325352714834, "grad_norm": 1.7448375225067139, "learning_rate": 2.4e-05, "loss": 0.3246, "step": 24 }, { "epoch": 0.001781388057574462, "grad_norm": 1.377163290977478, "learning_rate": 2.5e-05, "loss": 0.1817, "step": 25 }, { "epoch": 0.0018526435798774405, "grad_norm": 1.6893903017044067, "learning_rate": 2.6000000000000002e-05, "loss": 0.2882, "step": 26 }, { "epoch": 0.001923899102180419, "grad_norm": 1.412766456604004, "learning_rate": 2.7000000000000002e-05, "loss": 0.293, "step": 27 }, { "epoch": 0.0019951546244833976, "grad_norm": 1.3407995700836182, "learning_rate": 2.8e-05, "loss": 0.1812, "step": 28 }, { "epoch": 0.002066410146786376, "grad_norm": 1.8757520914077759, "learning_rate": 2.9e-05, "loss": 0.2906, "step": 29 }, { "epoch": 0.0021376656690893546, "grad_norm": 1.1567577123641968, "learning_rate": 3e-05, "loss": 0.2146, "step": 30 }, { "epoch": 0.002208921191392333, "grad_norm": 1.3666956424713135, "learning_rate": 2.9984895998119723e-05, "loss": 0.1976, "step": 31 }, { "epoch": 0.0022801767136953113, "grad_norm": 1.3091264963150024, "learning_rate": 2.993961440992859e-05, "loss": 0.1436, "step": 32 }, { "epoch": 0.00235143223599829, "grad_norm": 1.0269235372543335, "learning_rate": 2.9864246426519023e-05, "loss": 0.1848, "step": 33 }, { "epoch": 0.0024226877583012683, "grad_norm": 1.5328474044799805, "learning_rate": 2.9758943828979444e-05, "loss": 0.2269, "step": 34 }, { "epoch": 0.0024939432806042466, "grad_norm": 1.4854594469070435, "learning_rate": 2.9623918682727355e-05, "loss": 0.1739, "step": 35 }, { "epoch": 0.0025651988029072254, "grad_norm": 1.223655104637146, "learning_rate": 2.9459442910437798e-05, "loss": 0.1476, "step": 36 }, { "epoch": 0.0026364543252102037, "grad_norm": 1.455477237701416, "learning_rate": 2.9265847744427305e-05, "loss": 0.1762, "step": 37 }, { "epoch": 0.0027077098475131824, "grad_norm": 1.223581075668335, "learning_rate": 2.904352305959606e-05, "loss": 0.1392, "step": 38 }, { "epoch": 0.0027789653698161608, "grad_norm": 1.1783467531204224, "learning_rate": 2.8792916588271762e-05, "loss": 0.1729, "step": 39 }, { "epoch": 0.002850220892119139, "grad_norm": 1.8590701818466187, "learning_rate": 2.8514533018536286e-05, "loss": 0.3492, "step": 40 }, { "epoch": 0.002921476414422118, "grad_norm": 1.1538301706314087, "learning_rate": 2.820893297785107e-05, "loss": 0.1841, "step": 41 }, { "epoch": 0.002992731936725096, "grad_norm": 1.5694233179092407, "learning_rate": 2.7876731904027994e-05, "loss": 0.2156, "step": 42 }, { "epoch": 0.003063987459028075, "grad_norm": 1.357988953590393, "learning_rate": 2.7518598805819542e-05, "loss": 0.2037, "step": 43 }, { "epoch": 0.003135242981331053, "grad_norm": 1.150630235671997, "learning_rate": 2.7135254915624213e-05, "loss": 0.1607, "step": 44 }, { "epoch": 0.0032064985036340315, "grad_norm": 1.2249001264572144, "learning_rate": 2.672747223702045e-05, "loss": 0.2267, "step": 45 }, { "epoch": 0.0032777540259370103, "grad_norm": 1.1600937843322754, "learning_rate": 2.6296071990054167e-05, "loss": 0.1363, "step": 46 }, { "epoch": 0.0033490095482399886, "grad_norm": 1.2449615001678467, "learning_rate": 2.5841922957410875e-05, "loss": 0.2024, "step": 47 }, { "epoch": 0.003420265070542967, "grad_norm": 1.292077660560608, "learning_rate": 2.5365939734802973e-05, "loss": 0.1311, "step": 48 }, { "epoch": 0.0034915205928459456, "grad_norm": 1.2999500036239624, "learning_rate": 2.4869080889095693e-05, "loss": 0.1977, "step": 49 }, { "epoch": 0.003562776115148924, "grad_norm": 1.0472160577774048, "learning_rate": 2.4352347027881003e-05, "loss": 0.1228, "step": 50 }, { "epoch": 0.003562776115148924, "eval_loss": 0.2451000064611435, "eval_runtime": 732.9403, "eval_samples_per_second": 8.062, "eval_steps_per_second": 2.017, "step": 50 } ], "logging_steps": 1, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.904498393166643e+16, "train_batch_size": 4, "trial_name": null, "trial_params": null }