|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.027027027027027, |
|
"eval_steps": 25, |
|
"global_step": 42, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07207207207207207, |
|
"grad_norm": 0.23242872953414917, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3706, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07207207207207207, |
|
"eval_loss": NaN, |
|
"eval_runtime": 0.1032, |
|
"eval_samples_per_second": 484.269, |
|
"eval_steps_per_second": 125.91, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.14414414414414414, |
|
"grad_norm": 0.27956777811050415, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3809, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"grad_norm": 0.33814385533332825, |
|
"learning_rate": 9.986128001799077e-05, |
|
"loss": 10.3776, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2882882882882883, |
|
"grad_norm": 0.22053034603595734, |
|
"learning_rate": 9.94459753267812e-05, |
|
"loss": 10.3718, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.36036036036036034, |
|
"grad_norm": 0.2625846862792969, |
|
"learning_rate": 9.875664641789545e-05, |
|
"loss": 10.3714, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.43243243243243246, |
|
"grad_norm": 0.3214924931526184, |
|
"learning_rate": 9.779754323328192e-05, |
|
"loss": 10.3845, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5045045045045045, |
|
"grad_norm": 0.26409900188446045, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 10.3931, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5765765765765766, |
|
"grad_norm": 0.2600726783275604, |
|
"learning_rate": 9.509529358847655e-05, |
|
"loss": 10.384, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6486486486486487, |
|
"grad_norm": 0.30863380432128906, |
|
"learning_rate": 9.336880739593416e-05, |
|
"loss": 10.3812, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7207207207207207, |
|
"grad_norm": 0.31988584995269775, |
|
"learning_rate": 9.140576474687264e-05, |
|
"loss": 10.3635, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7927927927927928, |
|
"grad_norm": 0.23925259709358215, |
|
"learning_rate": 8.921826845200139e-05, |
|
"loss": 10.3874, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8648648648648649, |
|
"grad_norm": 0.3147229254245758, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 10.3739, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9369369369369369, |
|
"grad_norm": 0.4201517105102539, |
|
"learning_rate": 8.422516217485826e-05, |
|
"loss": 10.3673, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.009009009009009, |
|
"grad_norm": 0.33698132634162903, |
|
"learning_rate": 8.14503363531613e-05, |
|
"loss": 11.8408, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.0810810810810811, |
|
"grad_norm": 0.2550658881664276, |
|
"learning_rate": 7.85124354122177e-05, |
|
"loss": 9.1756, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1531531531531531, |
|
"grad_norm": 0.40897005796432495, |
|
"learning_rate": 7.542957248827961e-05, |
|
"loss": 12.3049, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2252252252252251, |
|
"grad_norm": 0.3496151864528656, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 9.059, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.2972972972972974, |
|
"grad_norm": 0.3040536940097809, |
|
"learning_rate": 6.890576474687263e-05, |
|
"loss": 11.035, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.3693693693693694, |
|
"grad_norm": 0.320978045463562, |
|
"learning_rate": 6.550504137351576e-05, |
|
"loss": 9.6682, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4414414414414414, |
|
"grad_norm": 0.4239856004714966, |
|
"learning_rate": 6.203955092681039e-05, |
|
"loss": 10.5403, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5135135135135136, |
|
"grad_norm": 0.34272250533103943, |
|
"learning_rate": 5.8530659307753036e-05, |
|
"loss": 10.5671, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.5855855855855856, |
|
"grad_norm": 0.3536415100097656, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 10.1439, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.6576576576576576, |
|
"grad_norm": 0.4582449793815613, |
|
"learning_rate": 5.1469340692246995e-05, |
|
"loss": 11.4783, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.7297297297297298, |
|
"grad_norm": 0.36027440428733826, |
|
"learning_rate": 4.7960449073189606e-05, |
|
"loss": 9.6784, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.8018018018018018, |
|
"grad_norm": 0.34310826659202576, |
|
"learning_rate": 4.4494958626484276e-05, |
|
"loss": 9.7889, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8018018018018018, |
|
"eval_loss": NaN, |
|
"eval_runtime": 0.1168, |
|
"eval_samples_per_second": 428.002, |
|
"eval_steps_per_second": 111.28, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8738738738738738, |
|
"grad_norm": 0.5108070373535156, |
|
"learning_rate": 4.109423525312738e-05, |
|
"loss": 11.2485, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.945945945945946, |
|
"grad_norm": 0.5605173110961914, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 10.3144, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.018018018018018, |
|
"grad_norm": 0.4780227541923523, |
|
"learning_rate": 3.45704275117204e-05, |
|
"loss": 11.2554, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.09009009009009, |
|
"grad_norm": 0.3900972306728363, |
|
"learning_rate": 3.1487564587782306e-05, |
|
"loss": 10.069, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.1621621621621623, |
|
"grad_norm": 0.4860075116157532, |
|
"learning_rate": 2.854966364683872e-05, |
|
"loss": 11.2109, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.234234234234234, |
|
"grad_norm": 0.4599916636943817, |
|
"learning_rate": 2.577483782514174e-05, |
|
"loss": 8.562, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.3063063063063063, |
|
"grad_norm": 0.3811488151550293, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 10.8037, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.3783783783783785, |
|
"grad_norm": 0.5661867260932922, |
|
"learning_rate": 2.0781731547998614e-05, |
|
"loss": 12.446, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.4504504504504503, |
|
"grad_norm": 0.5369315147399902, |
|
"learning_rate": 1.8594235253127375e-05, |
|
"loss": 9.2962, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.5225225225225225, |
|
"grad_norm": 0.41405609250068665, |
|
"learning_rate": 1.6631192604065855e-05, |
|
"loss": 10.2131, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.5945945945945947, |
|
"grad_norm": 0.43550166487693787, |
|
"learning_rate": 1.490470641152345e-05, |
|
"loss": 10.1199, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 0.5510601997375488, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 10.9105, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.7387387387387387, |
|
"grad_norm": 0.44297927618026733, |
|
"learning_rate": 1.2202456766718093e-05, |
|
"loss": 10.5097, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.810810810810811, |
|
"grad_norm": 0.4119100868701935, |
|
"learning_rate": 1.1243353582104556e-05, |
|
"loss": 9.4872, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.8828828828828827, |
|
"grad_norm": 0.5762305855751038, |
|
"learning_rate": 1.0554024673218807e-05, |
|
"loss": 11.9048, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.954954954954955, |
|
"grad_norm": 0.524325966835022, |
|
"learning_rate": 1.0138719982009242e-05, |
|
"loss": 10.0337, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 3.027027027027027, |
|
"grad_norm": 0.5072345733642578, |
|
"learning_rate": 1e-05, |
|
"loss": 10.4668, |
|
"step": 42 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 42, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 35925170061312.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|