|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.971563981042654, |
|
"eval_steps": 100, |
|
"global_step": 104, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.018957345971563982, |
|
"grad_norm": 132.20305622646663, |
|
"learning_rate": 4.545454545454545e-08, |
|
"logits/chosen": 117.53560638427734, |
|
"logits/rejected": 126.8960952758789, |
|
"logps/chosen": -335.40118408203125, |
|
"logps/rejected": -439.16552734375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1895734597156398, |
|
"grad_norm": 132.5788441374347, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": 135.00689697265625, |
|
"logits/rejected": 138.35719299316406, |
|
"logps/chosen": -396.1441955566406, |
|
"logps/rejected": -439.40240478515625, |
|
"loss": 0.7179, |
|
"rewards/accuracies": 0.4444444477558136, |
|
"rewards/chosen": -0.007382770534604788, |
|
"rewards/margins": -0.0036396842915564775, |
|
"rewards/rejected": -0.003743087174370885, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3791469194312796, |
|
"grad_norm": 130.1388638934296, |
|
"learning_rate": 4.885348141000122e-07, |
|
"logits/chosen": 121.08137512207031, |
|
"logits/rejected": 124.82417297363281, |
|
"logps/chosen": -371.0042419433594, |
|
"logps/rejected": -424.25299072265625, |
|
"loss": 0.6433, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.07038744539022446, |
|
"rewards/margins": 0.28346824645996094, |
|
"rewards/rejected": -0.21308080852031708, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5687203791469194, |
|
"grad_norm": 119.85468134756464, |
|
"learning_rate": 4.5025027361734613e-07, |
|
"logits/chosen": 142.1728057861328, |
|
"logits/rejected": 135.75143432617188, |
|
"logps/chosen": -421.06903076171875, |
|
"logps/rejected": -467.2119140625, |
|
"loss": 0.5746, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.4301902055740356, |
|
"rewards/margins": 0.9302951693534851, |
|
"rewards/rejected": -2.360485553741455, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7582938388625592, |
|
"grad_norm": 108.85906420456956, |
|
"learning_rate": 3.893311157806091e-07, |
|
"logits/chosen": 125.63570404052734, |
|
"logits/rejected": 113.9980697631836, |
|
"logps/chosen": -397.3561706542969, |
|
"logps/rejected": -424.4190368652344, |
|
"loss": 0.5496, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -2.158099412918091, |
|
"rewards/margins": 1.169090986251831, |
|
"rewards/rejected": -3.327190399169922, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9478672985781991, |
|
"grad_norm": 121.38249679586032, |
|
"learning_rate": 3.126631330646801e-07, |
|
"logits/chosen": 140.33822631835938, |
|
"logits/rejected": 144.39553833007812, |
|
"logps/chosen": -462.13653564453125, |
|
"logps/rejected": -546.0151977539062, |
|
"loss": 0.4858, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -2.1496312618255615, |
|
"rewards/margins": 1.3348098993301392, |
|
"rewards/rejected": -3.484441041946411, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1374407582938388, |
|
"grad_norm": 60.277332590993936, |
|
"learning_rate": 2.2891223348923882e-07, |
|
"logits/chosen": 131.23561096191406, |
|
"logits/rejected": 134.5987091064453, |
|
"logps/chosen": -446.07977294921875, |
|
"logps/rejected": -534.3648071289062, |
|
"loss": 0.3021, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -2.403707265853882, |
|
"rewards/margins": 2.4962546825408936, |
|
"rewards/rejected": -4.899962425231934, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.3270142180094786, |
|
"grad_norm": 41.03323290379581, |
|
"learning_rate": 1.4754491880085317e-07, |
|
"logits/chosen": 121.9046401977539, |
|
"logits/rejected": 124.14168548583984, |
|
"logps/chosen": -419.36175537109375, |
|
"logps/rejected": -526.709228515625, |
|
"loss": 0.1837, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -2.6108951568603516, |
|
"rewards/margins": 3.0488715171813965, |
|
"rewards/rejected": -5.659766674041748, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.5165876777251186, |
|
"grad_norm": 59.303132679746895, |
|
"learning_rate": 7.775827023107834e-08, |
|
"logits/chosen": 107.08919525146484, |
|
"logits/rejected": 124.07452392578125, |
|
"logps/chosen": -425.52716064453125, |
|
"logps/rejected": -548.1698608398438, |
|
"loss": 0.1693, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -3.5938053131103516, |
|
"rewards/margins": 3.152803897857666, |
|
"rewards/rejected": -6.746609687805176, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.7061611374407581, |
|
"grad_norm": 49.07574340329523, |
|
"learning_rate": 2.7440387297912122e-08, |
|
"logits/chosen": 105.5522232055664, |
|
"logits/rejected": 118.3895492553711, |
|
"logps/chosen": -459.85089111328125, |
|
"logps/rejected": -583.5906982421875, |
|
"loss": 0.1517, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": -3.670989513397217, |
|
"rewards/margins": 3.62445330619812, |
|
"rewards/rejected": -7.295443058013916, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"grad_norm": 48.77979045218656, |
|
"learning_rate": 2.27878296044029e-09, |
|
"logits/chosen": 111.78726959228516, |
|
"logits/rejected": 111.61686706542969, |
|
"logps/chosen": -448.10687255859375, |
|
"logps/rejected": -548.4755859375, |
|
"loss": 0.1497, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": -3.3065879344940186, |
|
"rewards/margins": 3.278989315032959, |
|
"rewards/rejected": -6.585577964782715, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"eval_logits/chosen": 91.48928833007812, |
|
"eval_logits/rejected": 85.78076934814453, |
|
"eval_logps/chosen": -446.239990234375, |
|
"eval_logps/rejected": -478.9853515625, |
|
"eval_loss": 0.460786372423172, |
|
"eval_rewards/accuracies": 0.7291666865348816, |
|
"eval_rewards/chosen": -4.128589153289795, |
|
"eval_rewards/margins": 1.7171281576156616, |
|
"eval_rewards/rejected": -5.845716953277588, |
|
"eval_runtime": 128.1398, |
|
"eval_samples_per_second": 5.853, |
|
"eval_steps_per_second": 0.187, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.971563981042654, |
|
"step": 104, |
|
"total_flos": 0.0, |
|
"train_loss": 0.38906457561713, |
|
"train_runtime": 3484.4936, |
|
"train_samples_per_second": 3.874, |
|
"train_steps_per_second": 0.03 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 104, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|