|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.6019563581640331, |
|
"eval_steps": 30, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012039127163280662, |
|
"grad_norm": 11.953282356262207, |
|
"learning_rate": 5.555555555555555e-08, |
|
"logits/chosen": -0.48816660046577454, |
|
"logits/rejected": -0.42142170667648315, |
|
"logps/chosen": -117.26611328125, |
|
"logps/rejected": -125.41987609863281, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06019563581640331, |
|
"grad_norm": 16.68506622314453, |
|
"learning_rate": 2.7777777777777776e-07, |
|
"logits/chosen": -0.46595269441604614, |
|
"logits/rejected": -0.356529176235199, |
|
"logps/chosen": -190.95057678222656, |
|
"logps/rejected": -211.25076293945312, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.453125, |
|
"rewards/chosen": 0.0007588082225993276, |
|
"rewards/margins": 0.0022044419310986996, |
|
"rewards/rejected": -0.0014456338249146938, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12039127163280662, |
|
"grad_norm": 13.722668647766113, |
|
"learning_rate": 4.999499509357132e-07, |
|
"logits/chosen": -0.4793759286403656, |
|
"logits/rejected": -0.37052756547927856, |
|
"logps/chosen": -155.6678009033203, |
|
"logps/rejected": -199.44947814941406, |
|
"loss": 0.6889, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.005186144262552261, |
|
"rewards/margins": 0.009383995085954666, |
|
"rewards/rejected": -0.004197851754724979, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18058690744920994, |
|
"grad_norm": 12.493337631225586, |
|
"learning_rate": 4.982003369106287e-07, |
|
"logits/chosen": -0.49185729026794434, |
|
"logits/rejected": -0.37670475244522095, |
|
"logps/chosen": -76.30625915527344, |
|
"logps/rejected": -177.40817260742188, |
|
"loss": 0.6691, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.028129320591688156, |
|
"rewards/margins": 0.048517487943172455, |
|
"rewards/rejected": -0.020388163626194, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.24078254326561324, |
|
"grad_norm": 11.79749870300293, |
|
"learning_rate": 4.939682729058838e-07, |
|
"logits/chosen": -0.45126277208328247, |
|
"logits/rejected": -0.3729521930217743, |
|
"logps/chosen": -166.20733642578125, |
|
"logps/rejected": -207.5035400390625, |
|
"loss": 0.6249, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.08626963198184967, |
|
"rewards/margins": 0.14315639436244965, |
|
"rewards/rejected": -0.05688678100705147, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3009781790820166, |
|
"grad_norm": 10.574383735656738, |
|
"learning_rate": 4.872960871766826e-07, |
|
"logits/chosen": -0.4710594713687897, |
|
"logits/rejected": -0.3694532513618469, |
|
"logps/chosen": -86.93299865722656, |
|
"logps/rejected": -186.15248107910156, |
|
"loss": 0.587, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.12460210174322128, |
|
"rewards/margins": 0.23137669265270233, |
|
"rewards/rejected": -0.10677458345890045, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3611738148984199, |
|
"grad_norm": 11.093204498291016, |
|
"learning_rate": 4.782505135862175e-07, |
|
"logits/chosen": -0.45945605635643005, |
|
"logits/rejected": -0.33354875445365906, |
|
"logps/chosen": -71.20188903808594, |
|
"logps/rejected": -212.95767211914062, |
|
"loss": 0.5229, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.16831260919570923, |
|
"rewards/margins": 0.38181251287460327, |
|
"rewards/rejected": -0.21349990367889404, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3611738148984199, |
|
"eval_logits/chosen": -0.4618959426879883, |
|
"eval_logits/rejected": -0.3433874249458313, |
|
"eval_logps/chosen": -98.28858947753906, |
|
"eval_logps/rejected": -212.0100555419922, |
|
"eval_loss": 0.5059286952018738, |
|
"eval_rewards/accuracies": 1.0, |
|
"eval_rewards/chosen": 0.18195854127407074, |
|
"eval_rewards/margins": 0.4181654751300812, |
|
"eval_rewards/rejected": -0.23620688915252686, |
|
"eval_runtime": 179.4182, |
|
"eval_samples_per_second": 3.043, |
|
"eval_steps_per_second": 1.522, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4213694507148232, |
|
"grad_norm": 9.376220703125, |
|
"learning_rate": 4.6692202414695724e-07, |
|
"logits/chosen": -0.4632042944431305, |
|
"logits/rejected": -0.35029542446136475, |
|
"logps/chosen": -84.06396484375, |
|
"logps/rejected": -213.848388671875, |
|
"loss": 0.4976, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.2034817487001419, |
|
"rewards/margins": 0.4632874131202698, |
|
"rewards/rejected": -0.25980567932128906, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4815650865312265, |
|
"grad_norm": 8.679346084594727, |
|
"learning_rate": 4.534239241377266e-07, |
|
"logits/chosen": -0.44362330436706543, |
|
"logits/rejected": -0.2992916703224182, |
|
"logps/chosen": -105.2283706665039, |
|
"logps/rejected": -244.84890747070312, |
|
"loss": 0.4197, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.22778573632240295, |
|
"rewards/margins": 0.6910415291786194, |
|
"rewards/rejected": -0.46325573325157166, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5417607223476298, |
|
"grad_norm": 7.219143867492676, |
|
"learning_rate": 4.3789121884703727e-07, |
|
"logits/chosen": -0.41270333528518677, |
|
"logits/rejected": -0.27924439311027527, |
|
"logps/chosen": -70.08865356445312, |
|
"logps/rejected": -261.56170654296875, |
|
"loss": 0.3621, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.23598209023475647, |
|
"rewards/margins": 0.9187321662902832, |
|
"rewards/rejected": -0.6827500462532043, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6019563581640331, |
|
"grad_norm": 6.640863418579102, |
|
"learning_rate": 4.204792632772754e-07, |
|
"logits/chosen": -0.4174782633781433, |
|
"logits/rejected": -0.2659801244735718, |
|
"logps/chosen": -109.1211166381836, |
|
"logps/rejected": -280.77813720703125, |
|
"loss": 0.3123, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.2913265824317932, |
|
"rewards/margins": 1.1760694980621338, |
|
"rewards/rejected": -0.8847430348396301, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 166, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|