|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": -1.778551697731018, |
|
"logits/rejected": -1.5408724546432495, |
|
"logps/chosen": -277.6687927246094, |
|
"logps/pi_response": -231.2880859375, |
|
"logps/ref_response": -231.2880859375, |
|
"logps/rejected": -499.2101135253906, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -1.5808309316635132, |
|
"logits/rejected": -1.382399320602417, |
|
"logps/chosen": -353.8753662109375, |
|
"logps/pi_response": -214.94216918945312, |
|
"logps/ref_response": -209.04591369628906, |
|
"logps/rejected": -626.6241455078125, |
|
"loss": 0.6343, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.19698534905910492, |
|
"rewards/margins": 0.29289910197257996, |
|
"rewards/rejected": -0.4898844361305237, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -0.6259251236915588, |
|
"logits/rejected": -0.24522070586681366, |
|
"logps/chosen": -437.9039001464844, |
|
"logps/pi_response": -225.865966796875, |
|
"logps/ref_response": -177.60939025878906, |
|
"logps/rejected": -864.3118286132812, |
|
"loss": 0.5899, |
|
"rewards/accuracies": 0.765625, |
|
"rewards/chosen": -1.237844467163086, |
|
"rewards/margins": 1.7025201320648193, |
|
"rewards/rejected": -2.940364360809326, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -0.6101164817810059, |
|
"logits/rejected": -0.05170084908604622, |
|
"logps/chosen": -392.63580322265625, |
|
"logps/pi_response": -197.5888671875, |
|
"logps/ref_response": -172.53121948242188, |
|
"logps/rejected": -751.2313842773438, |
|
"loss": 0.4697, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -0.7799466848373413, |
|
"rewards/margins": 1.2489432096481323, |
|
"rewards/rejected": -2.0288898944854736, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -0.06927945464849472, |
|
"logits/rejected": 0.5743276476860046, |
|
"logps/chosen": -373.8592224121094, |
|
"logps/pi_response": -210.5477294921875, |
|
"logps/ref_response": -169.03140258789062, |
|
"logps/rejected": -838.1275634765625, |
|
"loss": 0.4394, |
|
"rewards/accuracies": 0.796875, |
|
"rewards/chosen": -0.8252162933349609, |
|
"rewards/margins": 1.579106092453003, |
|
"rewards/rejected": -2.404322385787964, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": 0.34383702278137207, |
|
"logits/rejected": 1.0953922271728516, |
|
"logps/chosen": -422.825439453125, |
|
"logps/pi_response": -239.9983673095703, |
|
"logps/ref_response": -178.077880859375, |
|
"logps/rejected": -802.85791015625, |
|
"loss": 0.4416, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -1.0435901880264282, |
|
"rewards/margins": 1.5074162483215332, |
|
"rewards/rejected": -2.55100679397583, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5042527813022419, |
|
"train_runtime": 3406.4519, |
|
"train_samples_per_second": 4.486, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|