|
{ |
|
"best_metric": 0.4401554763317108, |
|
"best_model_checkpoint": "./mistral/19-04-24-Weni-WeniGPT-Agents-Mistral-1.0.6-SFT-1.0.3-DPO_Experiment on DPO with other hyperparameters and best SFT model of WeniGPT-2_max_steps-366_batch_4_2024-04-19_ppid_9/checkpoint-90", |
|
"epoch": 1.4634146341463414, |
|
"eval_steps": 30, |
|
"global_step": 90, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 29.063692092895508, |
|
"learning_rate": 3.181818181818182e-06, |
|
"logits/chosen": -1.7455532550811768, |
|
"logits/rejected": -1.7793405055999756, |
|
"logps/chosen": -54.72136306762695, |
|
"logps/rejected": -97.28812408447266, |
|
"loss": 0.6924, |
|
"rewards/accuracies": 0.15000000596046448, |
|
"rewards/chosen": 0.003122234484180808, |
|
"rewards/margins": 0.0015356539515778422, |
|
"rewards/rejected": 0.0015865802997723222, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 15.157337188720703, |
|
"learning_rate": 4.9154929577464795e-06, |
|
"logits/chosen": -1.8641210794448853, |
|
"logits/rejected": -1.8705828189849854, |
|
"logps/chosen": -40.3618049621582, |
|
"logps/rejected": -55.96257400512695, |
|
"loss": 0.6737, |
|
"rewards/accuracies": 0.32499998807907104, |
|
"rewards/chosen": 0.04275550693273544, |
|
"rewards/margins": 0.040880247950553894, |
|
"rewards/rejected": 0.0018752537434920669, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 13.148392677307129, |
|
"learning_rate": 4.774647887323944e-06, |
|
"logits/chosen": -1.8284633159637451, |
|
"logits/rejected": -1.8519256114959717, |
|
"logps/chosen": -44.69489288330078, |
|
"logps/rejected": -76.79936981201172, |
|
"loss": 0.6371, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": 0.12410124391317368, |
|
"rewards/margins": 0.1248731017112732, |
|
"rewards/rejected": -0.0007718756678514183, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_logits/chosen": -1.7379764318466187, |
|
"eval_logits/rejected": -1.7726188898086548, |
|
"eval_logps/chosen": -50.49373245239258, |
|
"eval_logps/rejected": -83.54340362548828, |
|
"eval_loss": 0.5864880681037903, |
|
"eval_rewards/accuracies": 0.4642857015132904, |
|
"eval_rewards/chosen": 0.24716782569885254, |
|
"eval_rewards/margins": 0.25581416487693787, |
|
"eval_rewards/rejected": -0.008646338246762753, |
|
"eval_runtime": 8.1282, |
|
"eval_samples_per_second": 3.445, |
|
"eval_steps_per_second": 1.722, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 24.143388748168945, |
|
"learning_rate": 4.633802816901409e-06, |
|
"logits/chosen": -1.8080145120620728, |
|
"logits/rejected": -1.8319604396820068, |
|
"logps/chosen": -46.848201751708984, |
|
"logps/rejected": -81.42877960205078, |
|
"loss": 0.5649, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": 0.3360167443752289, |
|
"rewards/margins": 0.34645381569862366, |
|
"rewards/rejected": -0.010437069460749626, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.0, |
|
"learning_rate": 4.492957746478874e-06, |
|
"logits/chosen": -1.8638660907745361, |
|
"logits/rejected": -1.8754875659942627, |
|
"logps/chosen": -22.06886863708496, |
|
"logps/rejected": -41.17422103881836, |
|
"loss": 0.6377, |
|
"rewards/accuracies": 0.20000000298023224, |
|
"rewards/chosen": 0.25195080041885376, |
|
"rewards/margins": 0.16642150282859802, |
|
"rewards/rejected": 0.08552931249141693, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 7.51575231552124, |
|
"learning_rate": 4.352112676056338e-06, |
|
"logits/chosen": -1.8215672969818115, |
|
"logits/rejected": -1.8419084548950195, |
|
"logps/chosen": -44.14409255981445, |
|
"logps/rejected": -77.38871002197266, |
|
"loss": 0.5496, |
|
"rewards/accuracies": 0.3499999940395355, |
|
"rewards/chosen": 0.4203863739967346, |
|
"rewards/margins": 0.458610475063324, |
|
"rewards/rejected": -0.038224123418331146, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_logits/chosen": -1.7419661283493042, |
|
"eval_logits/rejected": -1.7773857116699219, |
|
"eval_logps/chosen": -49.36268997192383, |
|
"eval_logps/rejected": -83.60606384277344, |
|
"eval_loss": 0.4963766038417816, |
|
"eval_rewards/accuracies": 0.4642857015132904, |
|
"eval_rewards/chosen": 0.5864797234535217, |
|
"eval_rewards/margins": 0.613922655582428, |
|
"eval_rewards/rejected": -0.02744293212890625, |
|
"eval_runtime": 8.1315, |
|
"eval_samples_per_second": 3.443, |
|
"eval_steps_per_second": 1.722, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 3.454394817352295, |
|
"learning_rate": 4.211267605633803e-06, |
|
"logits/chosen": -1.7844343185424805, |
|
"logits/rejected": -1.8126220703125, |
|
"logps/chosen": -39.737998962402344, |
|
"logps/rejected": -78.76033782958984, |
|
"loss": 0.4925, |
|
"rewards/accuracies": 0.4000000059604645, |
|
"rewards/chosen": 0.6417733430862427, |
|
"rewards/margins": 0.7573493123054504, |
|
"rewards/rejected": -0.11557593196630478, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 14.345088005065918, |
|
"learning_rate": 4.070422535211268e-06, |
|
"logits/chosen": -1.822148084640503, |
|
"logits/rejected": -1.8463541269302368, |
|
"logps/chosen": -35.735836029052734, |
|
"logps/rejected": -75.4683837890625, |
|
"loss": 0.4965, |
|
"rewards/accuracies": 0.375, |
|
"rewards/chosen": 0.6439998745918274, |
|
"rewards/margins": 0.8037986755371094, |
|
"rewards/rejected": -0.1597987860441208, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 12.988151550292969, |
|
"learning_rate": 3.9295774647887325e-06, |
|
"logits/chosen": -1.858496069908142, |
|
"logits/rejected": -1.8683990240097046, |
|
"logps/chosen": -30.166736602783203, |
|
"logps/rejected": -50.970542907714844, |
|
"loss": 0.5185, |
|
"rewards/accuracies": 0.30000001192092896, |
|
"rewards/chosen": 0.6545813679695129, |
|
"rewards/margins": 0.7556509971618652, |
|
"rewards/rejected": -0.10106959193944931, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_logits/chosen": -1.7461166381835938, |
|
"eval_logits/rejected": -1.7827314138412476, |
|
"eval_logps/chosen": -47.9538688659668, |
|
"eval_logps/rejected": -83.84149169921875, |
|
"eval_loss": 0.4401554763317108, |
|
"eval_rewards/accuracies": 0.4642857015132904, |
|
"eval_rewards/chosen": 1.0091263055801392, |
|
"eval_rewards/margins": 1.1071958541870117, |
|
"eval_rewards/rejected": -0.09806957095861435, |
|
"eval_runtime": 8.1286, |
|
"eval_samples_per_second": 3.445, |
|
"eval_steps_per_second": 1.722, |
|
"step": 90 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 366, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 90, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|