|
{ |
|
"best_metric": 1.9449502229690552, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.136986301369863, |
|
"eval_steps": 25, |
|
"global_step": 42, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0730593607305936, |
|
"grad_norm": 1.630955696105957, |
|
"learning_rate": 5e-05, |
|
"loss": 2.656, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0730593607305936, |
|
"eval_loss": 2.7438390254974365, |
|
"eval_runtime": 0.9832, |
|
"eval_samples_per_second": 50.855, |
|
"eval_steps_per_second": 13.222, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1461187214611872, |
|
"grad_norm": 1.9037554264068604, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7281, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.2191780821917808, |
|
"grad_norm": 1.482515811920166, |
|
"learning_rate": 9.986128001799077e-05, |
|
"loss": 2.6894, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2922374429223744, |
|
"grad_norm": 1.0775504112243652, |
|
"learning_rate": 9.94459753267812e-05, |
|
"loss": 2.4234, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.365296803652968, |
|
"grad_norm": 1.1510299444198608, |
|
"learning_rate": 9.875664641789545e-05, |
|
"loss": 2.3912, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.4383561643835616, |
|
"grad_norm": 0.8954657912254333, |
|
"learning_rate": 9.779754323328192e-05, |
|
"loss": 2.3933, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5114155251141552, |
|
"grad_norm": 0.7784850597381592, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 2.2629, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5844748858447488, |
|
"grad_norm": 0.6699449419975281, |
|
"learning_rate": 9.509529358847655e-05, |
|
"loss": 2.2983, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6575342465753424, |
|
"grad_norm": 0.7167125344276428, |
|
"learning_rate": 9.336880739593416e-05, |
|
"loss": 2.2229, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.730593607305936, |
|
"grad_norm": 0.6821659207344055, |
|
"learning_rate": 9.140576474687264e-05, |
|
"loss": 2.2196, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8036529680365296, |
|
"grad_norm": 0.5606189370155334, |
|
"learning_rate": 8.921826845200139e-05, |
|
"loss": 2.1865, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8767123287671232, |
|
"grad_norm": 0.5747855305671692, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.2302, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9497716894977168, |
|
"grad_norm": 0.7128291726112366, |
|
"learning_rate": 8.422516217485826e-05, |
|
"loss": 2.105, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.045662100456621, |
|
"grad_norm": 0.9701964259147644, |
|
"learning_rate": 8.14503363531613e-05, |
|
"loss": 3.6055, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.1187214611872145, |
|
"grad_norm": 0.544215977191925, |
|
"learning_rate": 7.85124354122177e-05, |
|
"loss": 2.0683, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.191780821917808, |
|
"grad_norm": 0.6170773506164551, |
|
"learning_rate": 7.542957248827961e-05, |
|
"loss": 2.0378, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2648401826484017, |
|
"grad_norm": 0.5937121510505676, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 1.9573, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.3378995433789953, |
|
"grad_norm": 0.5988708734512329, |
|
"learning_rate": 6.890576474687263e-05, |
|
"loss": 2.1244, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.410958904109589, |
|
"grad_norm": 0.5857730507850647, |
|
"learning_rate": 6.550504137351576e-05, |
|
"loss": 2.013, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4840182648401825, |
|
"grad_norm": 0.6009112596511841, |
|
"learning_rate": 6.203955092681039e-05, |
|
"loss": 1.6306, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5570776255707761, |
|
"grad_norm": 0.7636632919311523, |
|
"learning_rate": 5.8530659307753036e-05, |
|
"loss": 2.2839, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.6301369863013697, |
|
"grad_norm": 0.7096514701843262, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.0574, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.7031963470319633, |
|
"grad_norm": 0.6771715879440308, |
|
"learning_rate": 5.1469340692246995e-05, |
|
"loss": 1.6397, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.776255707762557, |
|
"grad_norm": 0.7442609071731567, |
|
"learning_rate": 4.7960449073189606e-05, |
|
"loss": 2.0326, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.8493150684931505, |
|
"grad_norm": 0.717400074005127, |
|
"learning_rate": 4.4494958626484276e-05, |
|
"loss": 1.9805, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8493150684931505, |
|
"eval_loss": 1.9449502229690552, |
|
"eval_runtime": 0.9766, |
|
"eval_samples_per_second": 51.196, |
|
"eval_steps_per_second": 13.311, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.9223744292237441, |
|
"grad_norm": 0.9425220489501953, |
|
"learning_rate": 4.109423525312738e-05, |
|
"loss": 2.0892, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.018264840182648, |
|
"grad_norm": 1.2423070669174194, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 2.944, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.091324200913242, |
|
"grad_norm": 0.7437182068824768, |
|
"learning_rate": 3.45704275117204e-05, |
|
"loss": 1.8443, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.1643835616438354, |
|
"grad_norm": 0.7723404765129089, |
|
"learning_rate": 3.1487564587782306e-05, |
|
"loss": 1.8569, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.237442922374429, |
|
"grad_norm": 0.7458028793334961, |
|
"learning_rate": 2.854966364683872e-05, |
|
"loss": 1.5894, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.3105022831050226, |
|
"grad_norm": 0.8333907723426819, |
|
"learning_rate": 2.577483782514174e-05, |
|
"loss": 2.0206, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.383561643835616, |
|
"grad_norm": 0.7999001145362854, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.8319, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.45662100456621, |
|
"grad_norm": 0.8819563388824463, |
|
"learning_rate": 2.0781731547998614e-05, |
|
"loss": 1.6749, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.5296803652968034, |
|
"grad_norm": 0.9369629621505737, |
|
"learning_rate": 1.8594235253127375e-05, |
|
"loss": 1.8284, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.602739726027397, |
|
"grad_norm": 0.9049533605575562, |
|
"learning_rate": 1.6631192604065855e-05, |
|
"loss": 1.8499, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.6757990867579906, |
|
"grad_norm": 0.8291096091270447, |
|
"learning_rate": 1.490470641152345e-05, |
|
"loss": 1.6656, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.748858447488584, |
|
"grad_norm": 0.933771550655365, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 1.7633, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.821917808219178, |
|
"grad_norm": 0.9030247330665588, |
|
"learning_rate": 1.2202456766718093e-05, |
|
"loss": 1.8017, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.8949771689497714, |
|
"grad_norm": 0.9774025678634644, |
|
"learning_rate": 1.1243353582104556e-05, |
|
"loss": 1.7459, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.968036529680365, |
|
"grad_norm": 1.4044599533081055, |
|
"learning_rate": 1.0554024673218807e-05, |
|
"loss": 2.4792, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.0639269406392695, |
|
"grad_norm": 1.0370107889175415, |
|
"learning_rate": 1.0138719982009242e-05, |
|
"loss": 1.9816, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 3.136986301369863, |
|
"grad_norm": 0.9146576523780823, |
|
"learning_rate": 1e-05, |
|
"loss": 1.7765, |
|
"step": 42 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 42, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3648395138760704e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|