{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.984255147355672, "eval_steps": 500, "global_step": 462, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.12918853451756157, "grad_norm": 18.75351905822754, "learning_rate": 2.127659574468085e-07, "logits/chosen": -1.451206922531128, "logits/rejected": -1.453438401222229, "logps/chosen": -1178.2191162109375, "logps/rejected": -1103.279541015625, "loss": 0.6962, "rewards/accuracies": 0.4242187440395355, "rewards/chosen": -0.0020913523621857166, "rewards/margins": -0.008680100552737713, "rewards/rejected": 0.006588748190551996, "step": 20 }, { "epoch": 0.25837706903512314, "grad_norm": 18.161069869995117, "learning_rate": 4.25531914893617e-07, "logits/chosen": -1.4470716714859009, "logits/rejected": -1.4471380710601807, "logps/chosen": -1174.496337890625, "logps/rejected": -1101.42333984375, "loss": 0.6931, "rewards/accuracies": 0.512499988079071, "rewards/chosen": 0.012329095974564552, "rewards/margins": 0.002261379035189748, "rewards/rejected": 0.010067718103528023, "step": 40 }, { "epoch": 0.3875656035526847, "grad_norm": 18.111698150634766, "learning_rate": 4.987903778327268e-07, "logits/chosen": -1.4492727518081665, "logits/rejected": -1.454036831855774, "logps/chosen": -1144.927490234375, "logps/rejected": -1060.2935791015625, "loss": 0.6842, "rewards/accuracies": 0.5726562738418579, "rewards/chosen": 0.04902833700180054, "rewards/margins": 0.026971647515892982, "rewards/rejected": 0.022056687623262405, "step": 60 }, { "epoch": 0.5167541380702463, "grad_norm": 17.73369598388672, "learning_rate": 4.922396431162129e-07, "logits/chosen": -1.4595801830291748, "logits/rejected": -1.4629844427108765, "logps/chosen": -1171.3333740234375, "logps/rejected": -1109.41455078125, "loss": 0.6694, "rewards/accuracies": 0.610156238079071, "rewards/chosen": 0.1019735336303711, "rewards/margins": 0.06121420860290527, "rewards/rejected": 0.04075933247804642, "step": 80 }, { "epoch": 0.6459426725878078, "grad_norm": 16.904024124145508, "learning_rate": 4.801467490723401e-07, "logits/chosen": -1.4490680694580078, "logits/rejected": -1.4522110223770142, "logps/chosen": -1182.560791015625, "logps/rejected": -1114.6134033203125, "loss": 0.6552, "rewards/accuracies": 0.6148437261581421, "rewards/chosen": 0.07732206583023071, "rewards/margins": 0.11570864915847778, "rewards/rejected": -0.03838658332824707, "step": 100 }, { "epoch": 0.7751312071053694, "grad_norm": 17.63266372680664, "learning_rate": 4.627883669538311e-07, "logits/chosen": -1.4325090646743774, "logits/rejected": -1.4361627101898193, "logps/chosen": -1182.9852294921875, "logps/rejected": -1119.953125, "loss": 0.647, "rewards/accuracies": 0.6351562738418579, "rewards/chosen": 0.06345769017934799, "rewards/margins": 0.14128440618515015, "rewards/rejected": -0.07782672345638275, "step": 120 }, { "epoch": 0.9043197416229309, "grad_norm": 17.77622413635254, "learning_rate": 4.405616362137017e-07, "logits/chosen": -1.4502944946289062, "logits/rejected": -1.4528261423110962, "logps/chosen": -1160.4075927734375, "logps/rejected": -1093.4769287109375, "loss": 0.6328, "rewards/accuracies": 0.62109375, "rewards/chosen": 0.04682188481092453, "rewards/margins": 0.19679759442806244, "rewards/rejected": -0.1499757319688797, "step": 140 }, { "epoch": 1.0335082761404926, "grad_norm": 16.421310424804688, "learning_rate": 4.139750784196997e-07, "logits/chosen": -1.4416507482528687, "logits/rejected": -1.4441019296646118, "logps/chosen": -1162.17919921875, "logps/rejected": -1097.062744140625, "loss": 0.6268, "rewards/accuracies": 0.6539062261581421, "rewards/chosen": 0.017796631902456284, "rewards/margins": 0.2243949919939041, "rewards/rejected": -0.20659832656383514, "step": 160 }, { "epoch": 1.1626968106580542, "grad_norm": 16.477689743041992, "learning_rate": 3.836369628764067e-07, "logits/chosen": -1.4492335319519043, "logits/rejected": -1.4500490427017212, "logps/chosen": -1149.756103515625, "logps/rejected": -1079.184326171875, "loss": 0.5971, "rewards/accuracies": 0.6937500238418579, "rewards/chosen": -0.020682932808995247, "rewards/margins": 0.3120761513710022, "rewards/rejected": -0.3327590823173523, "step": 180 }, { "epoch": 1.2918853451756156, "grad_norm": 16.82990074157715, "learning_rate": 3.5024139013594445e-07, "logits/chosen": -1.4431254863739014, "logits/rejected": -1.4477680921554565, "logps/chosen": -1168.659912109375, "logps/rejected": -1104.318115234375, "loss": 0.581, "rewards/accuracies": 0.7085937261581421, "rewards/chosen": -0.008671097457408905, "rewards/margins": 0.3281632959842682, "rewards/rejected": -0.3368344008922577, "step": 200 }, { "epoch": 1.4210738796931772, "grad_norm": 16.532955169677734, "learning_rate": 3.1455241179026165e-07, "logits/chosen": -1.4374616146087646, "logits/rejected": -1.4397255182266235, "logps/chosen": -1178.955810546875, "logps/rejected": -1098.697021484375, "loss": 0.5797, "rewards/accuracies": 0.711718738079071, "rewards/chosen": -0.046892568469047546, "rewards/margins": 0.3680500090122223, "rewards/rejected": -0.41494256258010864, "step": 220 }, { "epoch": 1.5502624142107388, "grad_norm": 15.883248329162598, "learning_rate": 2.7738654986555523e-07, "logits/chosen": -1.4424382448196411, "logits/rejected": -1.446033239364624, "logps/chosen": -1131.3668212890625, "logps/rejected": -1066.152099609375, "loss": 0.5729, "rewards/accuracies": 0.6937500238418579, "rewards/chosen": -0.07887513935565948, "rewards/margins": 0.3466903865337372, "rewards/rejected": -0.42556554079055786, "step": 240 }, { "epoch": 1.6794509487283005, "grad_norm": 16.528196334838867, "learning_rate": 2.3959411575460777e-07, "logits/chosen": -1.437410831451416, "logits/rejected": -1.4404833316802979, "logps/chosen": -1151.679931640625, "logps/rejected": -1087.936279296875, "loss": 0.5745, "rewards/accuracies": 0.7210937738418579, "rewards/chosen": -0.05212847515940666, "rewards/margins": 0.4079786241054535, "rewards/rejected": -0.46010708808898926, "step": 260 }, { "epoch": 1.8086394832458619, "grad_norm": 16.318754196166992, "learning_rate": 2.02039756087992e-07, "logits/chosen": -1.455380916595459, "logits/rejected": -1.4586334228515625, "logps/chosen": -1133.935791015625, "logps/rejected": -1072.302734375, "loss": 0.5721, "rewards/accuracies": 0.73046875, "rewards/chosen": -0.06314127147197723, "rewards/margins": 0.40634602308273315, "rewards/rejected": -0.4694872796535492, "step": 280 }, { "epoch": 1.9378280177634235, "grad_norm": 16.61640739440918, "learning_rate": 1.655826706318234e-07, "logits/chosen": -1.4276014566421509, "logits/rejected": -1.4284788370132446, "logps/chosen": -1142.511962890625, "logps/rejected": -1087.729248046875, "loss": 0.564, "rewards/accuracies": 0.7367187738418579, "rewards/chosen": -0.07105865329504013, "rewards/margins": 0.41813844442367554, "rewards/rejected": -0.4891970753669739, "step": 300 }, { "epoch": 2.067016552280985, "grad_norm": 14.662156105041504, "learning_rate": 1.3105695480339204e-07, "logits/chosen": -1.4481165409088135, "logits/rejected": -1.4495656490325928, "logps/chosen": -1163.4542236328125, "logps/rejected": -1094.1142578125, "loss": 0.5458, "rewards/accuracies": 0.76171875, "rewards/chosen": -0.022224558517336845, "rewards/margins": 0.4889918267726898, "rewards/rejected": -0.5112164616584778, "step": 320 }, { "epoch": 2.1962050867985465, "grad_norm": 17.926048278808594, "learning_rate": 9.925251654489414e-08, "logits/chosen": -1.443882703781128, "logits/rejected": -1.4449208974838257, "logps/chosen": -1168.8966064453125, "logps/rejected": -1111.7706298828125, "loss": 0.5392, "rewards/accuracies": 0.784375011920929, "rewards/chosen": 0.0047746943309903145, "rewards/margins": 0.531124472618103, "rewards/rejected": -0.5263497829437256, "step": 340 }, { "epoch": 2.3253936213161084, "grad_norm": 15.895486831665039, "learning_rate": 7.089700415484206e-08, "logits/chosen": -1.4399263858795166, "logits/rejected": -1.4458813667297363, "logps/chosen": -1162.751220703125, "logps/rejected": -1100.962158203125, "loss": 0.5311, "rewards/accuracies": 0.789843738079071, "rewards/chosen": 0.017337169498205185, "rewards/margins": 0.5304254293441772, "rewards/rejected": -0.5130882859230042, "step": 360 }, { "epoch": 2.45458215583367, "grad_norm": 14.746881484985352, "learning_rate": 4.663915854720396e-08, "logits/chosen": -1.4447704553604126, "logits/rejected": -1.4479763507843018, "logps/chosen": -1171.16748046875, "logps/rejected": -1107.5540771484375, "loss": 0.5343, "rewards/accuracies": 0.754687488079071, "rewards/chosen": -0.04202534630894661, "rewards/margins": 0.5067408084869385, "rewards/rejected": -0.5487660765647888, "step": 380 }, { "epoch": 2.583770690351231, "grad_norm": 14.920990943908691, "learning_rate": 2.7033970819087397e-08, "logits/chosen": -1.4494940042495728, "logits/rejected": -1.452332854270935, "logps/chosen": -1167.1435546875, "logps/rejected": -1098.287841796875, "loss": 0.5352, "rewards/accuracies": 0.746874988079071, "rewards/chosen": -0.007791751530021429, "rewards/margins": 0.47063732147216797, "rewards/rejected": -0.47842907905578613, "step": 400 }, { "epoch": 2.712959224868793, "grad_norm": 15.134760856628418, "learning_rate": 1.2529984704433922e-08, "logits/chosen": -1.4471887350082397, "logits/rejected": -1.452412724494934, "logps/chosen": -1167.8065185546875, "logps/rejected": -1100.0762939453125, "loss": 0.5342, "rewards/accuracies": 0.7789062261581421, "rewards/chosen": -0.027560725808143616, "rewards/margins": 0.5049134492874146, "rewards/rejected": -0.532474160194397, "step": 420 }, { "epoch": 2.8421477593863544, "grad_norm": 15.015375137329102, "learning_rate": 3.4590344187135634e-09, "logits/chosen": -1.445507287979126, "logits/rejected": -1.4463194608688354, "logps/chosen": -1124.656982421875, "logps/rejected": -1059.774169921875, "loss": 0.5244, "rewards/accuracies": 0.7710937261581421, "rewards/chosen": -0.0609331913292408, "rewards/margins": 0.5137851238250732, "rewards/rejected": -0.5747183561325073, "step": 440 }, { "epoch": 2.9713362939039163, "grad_norm": 14.278298377990723, "learning_rate": 2.8652680713725507e-11, "logits/chosen": -1.4286974668502808, "logits/rejected": -1.4289166927337646, "logps/chosen": -1178.025634765625, "logps/rejected": -1113.932861328125, "loss": 0.5264, "rewards/accuracies": 0.784375011920929, "rewards/chosen": -0.011726352386176586, "rewards/margins": 0.5253415703773499, "rewards/rejected": -0.5370678901672363, "step": 460 } ], "logging_steps": 20, "max_steps": 462, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }