{ "best_metric": 0.40118029713630676, "best_model_checkpoint": "./mistral/19-04-24-Weni-WeniGPT-Agents-Mistral-1.0.6-SFT-1.0.3-DPO_Experiment on DPO with other hyperparameters and best SFT model of WeniGPT-2_max_steps-366_batch_4_2024-04-19_ppid_9/checkpoint-180", "epoch": 2.926829268292683, "eval_steps": 30, "global_step": 180, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.16, "grad_norm": 29.063692092895508, "learning_rate": 3.181818181818182e-06, "logits/chosen": -1.7455532550811768, "logits/rejected": -1.7793405055999756, "logps/chosen": -54.72136306762695, "logps/rejected": -97.28812408447266, "loss": 0.6924, "rewards/accuracies": 0.15000000596046448, "rewards/chosen": 0.003122234484180808, "rewards/margins": 0.0015356539515778422, "rewards/rejected": 0.0015865802997723222, "step": 10 }, { "epoch": 0.33, "grad_norm": 15.157337188720703, "learning_rate": 4.9154929577464795e-06, "logits/chosen": -1.8641210794448853, "logits/rejected": -1.8705828189849854, "logps/chosen": -40.3618049621582, "logps/rejected": -55.96257400512695, "loss": 0.6737, "rewards/accuracies": 0.32499998807907104, "rewards/chosen": 0.04275550693273544, "rewards/margins": 0.040880247950553894, "rewards/rejected": 0.0018752537434920669, "step": 20 }, { "epoch": 0.49, "grad_norm": 13.148392677307129, "learning_rate": 4.774647887323944e-06, "logits/chosen": -1.8284633159637451, "logits/rejected": -1.8519256114959717, "logps/chosen": -44.69489288330078, "logps/rejected": -76.79936981201172, "loss": 0.6371, "rewards/accuracies": 0.42500001192092896, "rewards/chosen": 0.12410124391317368, "rewards/margins": 0.1248731017112732, "rewards/rejected": -0.0007718756678514183, "step": 30 }, { "epoch": 0.49, "eval_logits/chosen": -1.7379764318466187, "eval_logits/rejected": -1.7726188898086548, "eval_logps/chosen": -50.49373245239258, "eval_logps/rejected": -83.54340362548828, "eval_loss": 0.5864880681037903, "eval_rewards/accuracies": 0.4642857015132904, "eval_rewards/chosen": 0.24716782569885254, "eval_rewards/margins": 0.25581416487693787, "eval_rewards/rejected": -0.008646338246762753, "eval_runtime": 8.1282, "eval_samples_per_second": 3.445, "eval_steps_per_second": 1.722, "step": 30 }, { "epoch": 0.65, "grad_norm": 24.143388748168945, "learning_rate": 4.633802816901409e-06, "logits/chosen": -1.8080145120620728, "logits/rejected": -1.8319604396820068, "logps/chosen": -46.848201751708984, "logps/rejected": -81.42877960205078, "loss": 0.5649, "rewards/accuracies": 0.42500001192092896, "rewards/chosen": 0.3360167443752289, "rewards/margins": 0.34645381569862366, "rewards/rejected": -0.010437069460749626, "step": 40 }, { "epoch": 0.81, "grad_norm": 0.0, "learning_rate": 4.492957746478874e-06, "logits/chosen": -1.8638660907745361, "logits/rejected": -1.8754875659942627, "logps/chosen": -22.06886863708496, "logps/rejected": -41.17422103881836, "loss": 0.6377, "rewards/accuracies": 0.20000000298023224, "rewards/chosen": 0.25195080041885376, "rewards/margins": 0.16642150282859802, "rewards/rejected": 0.08552931249141693, "step": 50 }, { "epoch": 0.98, "grad_norm": 7.51575231552124, "learning_rate": 4.352112676056338e-06, "logits/chosen": -1.8215672969818115, "logits/rejected": -1.8419084548950195, "logps/chosen": -44.14409255981445, "logps/rejected": -77.38871002197266, "loss": 0.5496, "rewards/accuracies": 0.3499999940395355, "rewards/chosen": 0.4203863739967346, "rewards/margins": 0.458610475063324, "rewards/rejected": -0.038224123418331146, "step": 60 }, { "epoch": 0.98, "eval_logits/chosen": -1.7419661283493042, "eval_logits/rejected": -1.7773857116699219, "eval_logps/chosen": -49.36268997192383, "eval_logps/rejected": -83.60606384277344, "eval_loss": 0.4963766038417816, "eval_rewards/accuracies": 0.4642857015132904, "eval_rewards/chosen": 0.5864797234535217, "eval_rewards/margins": 0.613922655582428, "eval_rewards/rejected": -0.02744293212890625, "eval_runtime": 8.1315, "eval_samples_per_second": 3.443, "eval_steps_per_second": 1.722, "step": 60 }, { "epoch": 1.14, "grad_norm": 3.454394817352295, "learning_rate": 4.211267605633803e-06, "logits/chosen": -1.7844343185424805, "logits/rejected": -1.8126220703125, "logps/chosen": -39.737998962402344, "logps/rejected": -78.76033782958984, "loss": 0.4925, "rewards/accuracies": 0.4000000059604645, "rewards/chosen": 0.6417733430862427, "rewards/margins": 0.7573493123054504, "rewards/rejected": -0.11557593196630478, "step": 70 }, { "epoch": 1.3, "grad_norm": 14.345088005065918, "learning_rate": 4.070422535211268e-06, "logits/chosen": -1.822148084640503, "logits/rejected": -1.8463541269302368, "logps/chosen": -35.735836029052734, "logps/rejected": -75.4683837890625, "loss": 0.4965, "rewards/accuracies": 0.375, "rewards/chosen": 0.6439998745918274, "rewards/margins": 0.8037986755371094, "rewards/rejected": -0.1597987860441208, "step": 80 }, { "epoch": 1.46, "grad_norm": 12.988151550292969, "learning_rate": 3.9295774647887325e-06, "logits/chosen": -1.858496069908142, "logits/rejected": -1.8683990240097046, "logps/chosen": -30.166736602783203, "logps/rejected": -50.970542907714844, "loss": 0.5185, "rewards/accuracies": 0.30000001192092896, "rewards/chosen": 0.6545813679695129, "rewards/margins": 0.7556509971618652, "rewards/rejected": -0.10106959193944931, "step": 90 }, { "epoch": 1.46, "eval_logits/chosen": -1.7461166381835938, "eval_logits/rejected": -1.7827314138412476, "eval_logps/chosen": -47.9538688659668, "eval_logps/rejected": -83.84149169921875, "eval_loss": 0.4401554763317108, "eval_rewards/accuracies": 0.4642857015132904, "eval_rewards/chosen": 1.0091263055801392, "eval_rewards/margins": 1.1071958541870117, "eval_rewards/rejected": -0.09806957095861435, "eval_runtime": 8.1286, "eval_samples_per_second": 3.445, "eval_steps_per_second": 1.722, "step": 90 }, { "epoch": 1.63, "grad_norm": 7.574762344360352, "learning_rate": 3.7887323943661976e-06, "logits/chosen": -1.8498780727386475, "logits/rejected": -1.8676494359970093, "logps/chosen": -50.50703811645508, "logps/rejected": -83.80486297607422, "loss": 0.4752, "rewards/accuracies": 0.42500001192092896, "rewards/chosen": 1.088062047958374, "rewards/margins": 1.0784289836883545, "rewards/rejected": 0.009633284993469715, "step": 100 }, { "epoch": 1.79, "grad_norm": 10.141860008239746, "learning_rate": 3.6478873239436626e-06, "logits/chosen": -1.8536640405654907, "logits/rejected": -1.8669731616973877, "logps/chosen": -33.00554275512695, "logps/rejected": -46.077144622802734, "loss": 0.5439, "rewards/accuracies": 0.30000001192092896, "rewards/chosen": 0.7643357515335083, "rewards/margins": 0.7525759339332581, "rewards/rejected": 0.011759823188185692, "step": 110 }, { "epoch": 1.95, "grad_norm": 0.0, "learning_rate": 3.5070422535211268e-06, "logits/chosen": -1.8447682857513428, "logits/rejected": -1.8607944250106812, "logps/chosen": -42.43408966064453, "logps/rejected": -73.089599609375, "loss": 0.4623, "rewards/accuracies": 0.375, "rewards/chosen": 1.1391878128051758, "rewards/margins": 1.3124778270721436, "rewards/rejected": -0.17328998446464539, "step": 120 }, { "epoch": 1.95, "eval_logits/chosen": -1.7511886358261108, "eval_logits/rejected": -1.7883756160736084, "eval_logps/chosen": -46.9849739074707, "eval_logps/rejected": -84.11779022216797, "eval_loss": 0.4216788709163666, "eval_rewards/accuracies": 0.4642857015132904, "eval_rewards/chosen": 1.2997931241989136, "eval_rewards/margins": 1.4807531833648682, "eval_rewards/rejected": -0.1809600442647934, "eval_runtime": 8.133, "eval_samples_per_second": 3.443, "eval_steps_per_second": 1.721, "step": 120 }, { "epoch": 2.11, "grad_norm": 0.7550886869430542, "learning_rate": 3.3661971830985918e-06, "logits/chosen": -1.765539526939392, "logits/rejected": -1.8175758123397827, "logps/chosen": -50.52437973022461, "logps/rejected": -116.59224700927734, "loss": 0.3356, "rewards/accuracies": 0.550000011920929, "rewards/chosen": 1.8601114749908447, "rewards/margins": 2.246537685394287, "rewards/rejected": -0.3864262104034424, "step": 130 }, { "epoch": 2.28, "grad_norm": 0.23429778218269348, "learning_rate": 3.225352112676057e-06, "logits/chosen": -1.7902629375457764, "logits/rejected": -1.8166630268096924, "logps/chosen": -41.8301887512207, "logps/rejected": -78.8285140991211, "loss": 0.4254, "rewards/accuracies": 0.42500001192092896, "rewards/chosen": 1.3376247882843018, "rewards/margins": 1.733752965927124, "rewards/rejected": -0.39612799882888794, "step": 140 }, { "epoch": 2.44, "grad_norm": 0.4628850817680359, "learning_rate": 3.084507042253521e-06, "logits/chosen": -1.8713032007217407, "logits/rejected": -1.8906549215316772, "logps/chosen": -38.51209259033203, "logps/rejected": -69.11054992675781, "loss": 0.4985, "rewards/accuracies": 0.3499999940395355, "rewards/chosen": 1.159945011138916, "rewards/margins": 1.5182414054870605, "rewards/rejected": -0.35829633474349976, "step": 150 }, { "epoch": 2.44, "eval_logits/chosen": -1.7590656280517578, "eval_logits/rejected": -1.7967649698257446, "eval_logps/chosen": -45.99831008911133, "eval_logps/rejected": -84.5901107788086, "eval_loss": 0.40688762068748474, "eval_rewards/accuracies": 0.4642857015132904, "eval_rewards/chosen": 1.5957926511764526, "eval_rewards/margins": 1.9184513092041016, "eval_rewards/rejected": -0.3226587474346161, "eval_runtime": 8.13, "eval_samples_per_second": 3.444, "eval_steps_per_second": 1.722, "step": 150 }, { "epoch": 2.6, "grad_norm": 2.2907166481018066, "learning_rate": 2.943661971830986e-06, "logits/chosen": -1.9065347909927368, "logits/rejected": -1.9184277057647705, "logps/chosen": -35.44993209838867, "logps/rejected": -58.14739227294922, "loss": 0.5111, "rewards/accuracies": 0.30000001192092896, "rewards/chosen": 1.2248560190200806, "rewards/margins": 1.3947786092758179, "rewards/rejected": -0.1699226349592209, "step": 160 }, { "epoch": 2.76, "grad_norm": 0.19129106402397156, "learning_rate": 2.802816901408451e-06, "logits/chosen": -1.7800159454345703, "logits/rejected": -1.8162111043930054, "logps/chosen": -44.746212005615234, "logps/rejected": -87.70552062988281, "loss": 0.3875, "rewards/accuracies": 0.4749999940395355, "rewards/chosen": 1.9709749221801758, "rewards/margins": 2.394925355911255, "rewards/rejected": -0.42395058274269104, "step": 170 }, { "epoch": 2.93, "grad_norm": 0.09690173715353012, "learning_rate": 2.6619718309859156e-06, "logits/chosen": -1.9381850957870483, "logits/rejected": -1.9563757181167603, "logps/chosen": -20.314172744750977, "logps/rejected": -45.916996002197266, "loss": 0.5276, "rewards/accuracies": 0.25, "rewards/chosen": 0.9397296905517578, "rewards/margins": 1.2702549695968628, "rewards/rejected": -0.33052533864974976, "step": 180 }, { "epoch": 2.93, "eval_logits/chosen": -1.7637720108032227, "eval_logits/rejected": -1.8018139600753784, "eval_logps/chosen": -45.44319534301758, "eval_logps/rejected": -84.9321517944336, "eval_loss": 0.40118029713630676, "eval_rewards/accuracies": 0.4642857015132904, "eval_rewards/chosen": 1.762328028678894, "eval_rewards/margins": 2.1875970363616943, "eval_rewards/rejected": -0.425269216299057, "eval_runtime": 8.1311, "eval_samples_per_second": 3.444, "eval_steps_per_second": 1.722, "step": 180 } ], "logging_steps": 10, "max_steps": 366, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 90, "total_flos": 0.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }