|
{ |
|
"best_metric": 0.14273174107074738, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.2575245453082247, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005150490906164494, |
|
"grad_norm": 195.5614013671875, |
|
"learning_rate": 5e-05, |
|
"loss": 183.4214, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005150490906164494, |
|
"eval_loss": 6.080627918243408, |
|
"eval_runtime": 4.0984, |
|
"eval_samples_per_second": 12.2, |
|
"eval_steps_per_second": 3.172, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010300981812328988, |
|
"grad_norm": 212.53482055664062, |
|
"learning_rate": 0.0001, |
|
"loss": 184.5833, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01545147271849348, |
|
"grad_norm": 182.6468963623047, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 173.0269, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.020601963624657976, |
|
"grad_norm": 172.3017120361328, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 161.0496, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02575245453082247, |
|
"grad_norm": 148.2727813720703, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 127.753, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03090294543698696, |
|
"grad_norm": 159.01747131347656, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 109.5256, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03605343634315146, |
|
"grad_norm": 125.15324401855469, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 84.6997, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04120392724931595, |
|
"grad_norm": 95.49300384521484, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 76.9491, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04635441815548044, |
|
"grad_norm": 85.85482025146484, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 69.9889, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05150490906164494, |
|
"grad_norm": 102.58478546142578, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 64.823, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05665539996780943, |
|
"grad_norm": 96.248291015625, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 62.2639, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06180589087397392, |
|
"grad_norm": 107.83429718017578, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 56.1169, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06695638178013842, |
|
"grad_norm": 96.302001953125, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 63.8062, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07210687268630292, |
|
"grad_norm": 139.03134155273438, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 47.5832, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0772573635924674, |
|
"grad_norm": 91.16569519042969, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 41.2896, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0824078544986319, |
|
"grad_norm": 101.69834899902344, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 40.1252, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0875583454047964, |
|
"grad_norm": 94.4932861328125, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 35.824, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09270883631096088, |
|
"grad_norm": 65.91960906982422, |
|
"learning_rate": 7.75e-05, |
|
"loss": 32.7603, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09785932721712538, |
|
"grad_norm": 45.94514846801758, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 28.143, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10300981812328988, |
|
"grad_norm": 49.135292053222656, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 24.3015, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10816030902945437, |
|
"grad_norm": 52.58118438720703, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 24.7888, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11331079993561886, |
|
"grad_norm": 40.06536865234375, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 21.0641, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11846129084178336, |
|
"grad_norm": 61.566043853759766, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 19.7781, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12361178174794785, |
|
"grad_norm": 54.66169357299805, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 20.4498, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12876227265411236, |
|
"grad_norm": 158.48204040527344, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 31.4924, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.12876227265411236, |
|
"eval_loss": 0.5459790825843811, |
|
"eval_runtime": 4.1249, |
|
"eval_samples_per_second": 12.121, |
|
"eval_steps_per_second": 3.152, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13391276356027684, |
|
"grad_norm": 135.01632690429688, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 19.2796, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13906325446644133, |
|
"grad_norm": 122.40741729736328, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 20.3915, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.14421374537260584, |
|
"grad_norm": 77.29145050048828, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 15.7624, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.14936423627877032, |
|
"grad_norm": 71.43122863769531, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 16.1651, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1545147271849348, |
|
"grad_norm": 60.62587356567383, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 11.7429, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15966521809109932, |
|
"grad_norm": 69.12200927734375, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 14.7872, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1648157089972638, |
|
"grad_norm": 48.978023529052734, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 8.8029, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1699661999034283, |
|
"grad_norm": 47.26166534423828, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 9.5402, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1751166908095928, |
|
"grad_norm": 49.96639633178711, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 8.8643, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.18026718171575729, |
|
"grad_norm": 47.3193359375, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 10.71, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18541767262192177, |
|
"grad_norm": 55.040592193603516, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 9.0711, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.19056816352808628, |
|
"grad_norm": 60.64955520629883, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 10.7455, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.19571865443425077, |
|
"grad_norm": 96.02499389648438, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 14.7612, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.20086914534041525, |
|
"grad_norm": 68.96138763427734, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 10.6281, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20601963624657976, |
|
"grad_norm": 78.70532989501953, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 9.8648, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.21117012715274425, |
|
"grad_norm": 73.4675521850586, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 9.5117, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.21632061805890873, |
|
"grad_norm": 60.545936584472656, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 9.5021, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.22147110896507324, |
|
"grad_norm": 47.23418045043945, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 7.5249, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.22662159987123773, |
|
"grad_norm": 33.51955795288086, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 5.3886, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.2317720907774022, |
|
"grad_norm": 40.547977447509766, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 5.9733, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23692258168356672, |
|
"grad_norm": 34.97176742553711, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 5.6807, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.2420730725897312, |
|
"grad_norm": 79.3729476928711, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 6.1564, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.2472235634958957, |
|
"grad_norm": 46.89299011230469, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 4.3804, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2523740544020602, |
|
"grad_norm": 77.9176025390625, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 8.2541, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2575245453082247, |
|
"grad_norm": 107.83366394042969, |
|
"learning_rate": 1e-05, |
|
"loss": 10.4399, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2575245453082247, |
|
"eval_loss": 0.14273174107074738, |
|
"eval_runtime": 4.1412, |
|
"eval_samples_per_second": 12.074, |
|
"eval_steps_per_second": 3.139, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.259104775831552e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|