clm7b0129-cds-0.8-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-1100
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.9027492819039803, | |
"eval_steps": 50, | |
"global_step": 1100, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008206811653672548, | |
"grad_norm": 0.07778492569923401, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": -2.053281307220459, | |
"logits/rejected": -2.495474338531494, | |
"logps/chosen": -0.3126755356788635, | |
"logps/rejected": -0.3312620520591736, | |
"loss": 7.6211, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.4690132737159729, | |
"rewards/margins": 0.027879873290657997, | |
"rewards/rejected": -0.49689316749572754, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016413623307345096, | |
"grad_norm": 0.07773654907941818, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": -2.0624098777770996, | |
"logits/rejected": -2.4424185752868652, | |
"logps/chosen": -0.26926660537719727, | |
"logps/rejected": -0.2978014051914215, | |
"loss": 7.5195, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4038998484611511, | |
"rewards/margins": 0.04280223697423935, | |
"rewards/rejected": -0.44670209288597107, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024620434961017644, | |
"grad_norm": 0.07357177883386612, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": -2.068427562713623, | |
"logits/rejected": -2.486642360687256, | |
"logps/chosen": -0.29993391036987305, | |
"logps/rejected": -0.34360918402671814, | |
"loss": 7.4913, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.4499008059501648, | |
"rewards/margins": 0.06551288068294525, | |
"rewards/rejected": -0.5154137015342712, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03282724661469019, | |
"grad_norm": 0.14212799072265625, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": -2.015650987625122, | |
"logits/rejected": -2.3838727474212646, | |
"logps/chosen": -0.2911723852157593, | |
"logps/rejected": -0.30521970987319946, | |
"loss": 7.5217, | |
"rewards/accuracies": 0.48750001192092896, | |
"rewards/chosen": -0.4367586076259613, | |
"rewards/margins": 0.021070968359708786, | |
"rewards/rejected": -0.4578295648097992, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"grad_norm": 0.08107248693704605, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": -2.1150989532470703, | |
"logits/rejected": -2.4338631629943848, | |
"logps/chosen": -0.26249754428863525, | |
"logps/rejected": -0.3132360577583313, | |
"loss": 7.519, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.3937462866306305, | |
"rewards/margins": 0.07610772550106049, | |
"rewards/rejected": -0.4698540270328522, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04103405826836274, | |
"eval_logits/chosen": -2.0232737064361572, | |
"eval_logits/rejected": -2.4952735900878906, | |
"eval_logps/chosen": -0.27974528074264526, | |
"eval_logps/rejected": -0.3420677185058594, | |
"eval_loss": 0.9291417598724365, | |
"eval_rewards/accuracies": 0.49494948983192444, | |
"eval_rewards/chosen": -0.41961798071861267, | |
"eval_rewards/margins": 0.09348361939191818, | |
"eval_rewards/rejected": -0.5131015777587891, | |
"eval_runtime": 26.0563, | |
"eval_samples_per_second": 30.242, | |
"eval_steps_per_second": 3.799, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04924086992203529, | |
"grad_norm": 0.06815352290868759, | |
"learning_rate": 4.980286753286196e-06, | |
"logits/chosen": -1.9890680313110352, | |
"logits/rejected": -2.3848204612731934, | |
"logps/chosen": -0.26213228702545166, | |
"logps/rejected": -0.31342557072639465, | |
"loss": 7.432, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.3931984603404999, | |
"rewards/margins": 0.0769399031996727, | |
"rewards/rejected": -0.4701383709907532, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.057447681575707836, | |
"grad_norm": 0.06748568266630173, | |
"learning_rate": 4.973180832407471e-06, | |
"logits/chosen": -2.070542812347412, | |
"logits/rejected": -2.3977038860321045, | |
"logps/chosen": -0.24570491909980774, | |
"logps/rejected": -0.3655605912208557, | |
"loss": 7.35, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3685573935508728, | |
"rewards/margins": 0.17978355288505554, | |
"rewards/rejected": -0.548340916633606, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.06565449322938038, | |
"grad_norm": 0.10909309983253479, | |
"learning_rate": 4.964990092676263e-06, | |
"logits/chosen": -2.2012317180633545, | |
"logits/rejected": -2.346029758453369, | |
"logps/chosen": -0.2279246598482132, | |
"logps/rejected": -0.35396742820739746, | |
"loss": 7.5082, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.341886967420578, | |
"rewards/margins": 0.18906418979167938, | |
"rewards/rejected": -0.5309511423110962, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.07386130488305294, | |
"grad_norm": 0.05977805703878403, | |
"learning_rate": 4.9557181268217225e-06, | |
"logits/chosen": -2.0719449520111084, | |
"logits/rejected": -2.4491190910339355, | |
"logps/chosen": -0.2503294348716736, | |
"logps/rejected": -0.29939892888069153, | |
"loss": 7.5129, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.37549418210983276, | |
"rewards/margins": 0.07360419631004333, | |
"rewards/rejected": -0.4490983486175537, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"grad_norm": 0.051751479506492615, | |
"learning_rate": 4.9453690018345144e-06, | |
"logits/chosen": -2.0634045600891113, | |
"logits/rejected": -2.458428382873535, | |
"logps/chosen": -0.24033495783805847, | |
"logps/rejected": -0.29080909490585327, | |
"loss": 7.4432, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3605024516582489, | |
"rewards/margins": 0.07571124285459518, | |
"rewards/rejected": -0.4362136721611023, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.08206811653672548, | |
"eval_logits/chosen": -2.0207154750823975, | |
"eval_logits/rejected": -2.486215353012085, | |
"eval_logps/chosen": -0.2376101016998291, | |
"eval_logps/rejected": -0.32593628764152527, | |
"eval_loss": 0.9085211753845215, | |
"eval_rewards/accuracies": 0.5353535413742065, | |
"eval_rewards/chosen": -0.35641518235206604, | |
"eval_rewards/margins": 0.13248924911022186, | |
"eval_rewards/rejected": -0.4889043867588043, | |
"eval_runtime": 26.0119, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.09027492819039803, | |
"grad_norm": 0.06007291004061699, | |
"learning_rate": 4.933947257182901e-06, | |
"logits/chosen": -2.1248741149902344, | |
"logits/rejected": -2.409808874130249, | |
"logps/chosen": -0.2354653775691986, | |
"logps/rejected": -0.30269068479537964, | |
"loss": 7.317, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.3531980812549591, | |
"rewards/margins": 0.10083796828985214, | |
"rewards/rejected": -0.45403605699539185, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.09848173984407058, | |
"grad_norm": 0.055738095194101334, | |
"learning_rate": 4.921457902821578e-06, | |
"logits/chosen": -2.0635311603546143, | |
"logits/rejected": -2.4297730922698975, | |
"logps/chosen": -0.2315257489681244, | |
"logps/rejected": -0.33639490604400635, | |
"loss": 7.2775, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.3472886383533478, | |
"rewards/margins": 0.15730372071266174, | |
"rewards/rejected": -0.5045923590660095, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.10668855149774313, | |
"grad_norm": 0.07971248030662537, | |
"learning_rate": 4.907906416994146e-06, | |
"logits/chosen": -2.07852840423584, | |
"logits/rejected": -2.4043469429016113, | |
"logps/chosen": -0.20596058666706085, | |
"logps/rejected": -0.33416762948036194, | |
"loss": 7.336, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -0.3089408874511719, | |
"rewards/margins": 0.19231058657169342, | |
"rewards/rejected": -0.5012514591217041, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.11489536315141567, | |
"grad_norm": 0.08581534773111343, | |
"learning_rate": 4.893298743830168e-06, | |
"logits/chosen": -2.115981340408325, | |
"logits/rejected": -2.5363636016845703, | |
"logps/chosen": -0.22111928462982178, | |
"logps/rejected": -0.3136863708496094, | |
"loss": 7.2892, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.33167898654937744, | |
"rewards/margins": 0.1388506144285202, | |
"rewards/rejected": -0.47052955627441406, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"grad_norm": 0.06293604522943497, | |
"learning_rate": 4.8776412907378845e-06, | |
"logits/chosen": -2.070842742919922, | |
"logits/rejected": -2.4669342041015625, | |
"logps/chosen": -0.20812074840068817, | |
"logps/rejected": -0.29536327719688416, | |
"loss": 7.26, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.31218111515045166, | |
"rewards/margins": 0.13086381554603577, | |
"rewards/rejected": -0.4430449604988098, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.12310217480508823, | |
"eval_logits/chosen": -2.062544822692871, | |
"eval_logits/rejected": -2.5318312644958496, | |
"eval_logps/chosen": -0.2108660489320755, | |
"eval_logps/rejected": -0.3196176588535309, | |
"eval_loss": 0.8929102420806885, | |
"eval_rewards/accuracies": 0.5555555820465088, | |
"eval_rewards/chosen": -0.31629908084869385, | |
"eval_rewards/margins": 0.1631273776292801, | |
"eval_rewards/rejected": -0.47942644357681274, | |
"eval_runtime": 26.0407, | |
"eval_samples_per_second": 30.26, | |
"eval_steps_per_second": 3.802, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.13130898645876077, | |
"grad_norm": 0.06755395233631134, | |
"learning_rate": 4.860940925593703e-06, | |
"logits/chosen": -2.187638998031616, | |
"logits/rejected": -2.4928510189056396, | |
"logps/chosen": -0.2070399969816208, | |
"logps/rejected": -0.30727890133857727, | |
"loss": 7.1947, | |
"rewards/accuracies": 0.550000011920929, | |
"rewards/chosen": -0.3105599880218506, | |
"rewards/margins": 0.1503583937883377, | |
"rewards/rejected": -0.4609183669090271, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.1395157981124333, | |
"grad_norm": 0.08956371247768402, | |
"learning_rate": 4.84320497372973e-06, | |
"logits/chosen": -2.0751285552978516, | |
"logits/rejected": -2.478673219680786, | |
"logps/chosen": -0.18197472393512726, | |
"logps/rejected": -0.2756109833717346, | |
"loss": 7.1774, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.2729620933532715, | |
"rewards/margins": 0.14045441150665283, | |
"rewards/rejected": -0.41341647505760193, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.14772260976610588, | |
"grad_norm": 0.07708129286766052, | |
"learning_rate": 4.824441214720629e-06, | |
"logits/chosen": -2.113537311553955, | |
"logits/rejected": -2.530677556991577, | |
"logps/chosen": -0.20599500834941864, | |
"logps/rejected": -0.2911488711833954, | |
"loss": 7.1722, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.308992475271225, | |
"rewards/margins": 0.1277308166027069, | |
"rewards/rejected": -0.4367233216762543, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.15592942141977842, | |
"grad_norm": 0.0884585976600647, | |
"learning_rate": 4.804657878971252e-06, | |
"logits/chosen": -2.151444673538208, | |
"logits/rejected": -2.559861898422241, | |
"logps/chosen": -0.2093551605939865, | |
"logps/rejected": -0.2878231108188629, | |
"loss": 7.1186, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.31403273344039917, | |
"rewards/margins": 0.11770190298557281, | |
"rewards/rejected": -0.4317346513271332, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"grad_norm": 0.09445559978485107, | |
"learning_rate": 4.783863644106502e-06, | |
"logits/chosen": -2.278620481491089, | |
"logits/rejected": -2.5897645950317383, | |
"logps/chosen": -0.18631704151630402, | |
"logps/rejected": -0.3201253116130829, | |
"loss": 7.082, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.2794755697250366, | |
"rewards/margins": 0.2007124423980713, | |
"rewards/rejected": -0.4801879823207855, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.16413623307345096, | |
"eval_logits/chosen": -2.1718955039978027, | |
"eval_logits/rejected": -2.6710257530212402, | |
"eval_logps/chosen": -0.20382821559906006, | |
"eval_logps/rejected": -0.3390556573867798, | |
"eval_loss": 0.8775798678398132, | |
"eval_rewards/accuracies": 0.5858585834503174, | |
"eval_rewards/chosen": -0.3057423532009125, | |
"eval_rewards/margins": 0.20284107327461243, | |
"eval_rewards/rejected": -0.5085834264755249, | |
"eval_runtime": 26.0531, | |
"eval_samples_per_second": 30.246, | |
"eval_steps_per_second": 3.8, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.1723430447271235, | |
"grad_norm": 0.11077430099248886, | |
"learning_rate": 4.762067631165049e-06, | |
"logits/chosen": -2.2566323280334473, | |
"logits/rejected": -2.621065378189087, | |
"logps/chosen": -0.18663282692432404, | |
"logps/rejected": -0.290865957736969, | |
"loss": 7.1321, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -0.27994924783706665, | |
"rewards/margins": 0.15634974837303162, | |
"rewards/rejected": -0.43629899621009827, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.18054985638079607, | |
"grad_norm": 0.15500974655151367, | |
"learning_rate": 4.7392794005985324e-06, | |
"logits/chosen": -2.27781343460083, | |
"logits/rejected": -2.700369358062744, | |
"logps/chosen": -0.21367880702018738, | |
"logps/rejected": -0.31559067964553833, | |
"loss": 6.9886, | |
"rewards/accuracies": 0.5874999761581421, | |
"rewards/chosen": -0.3205181956291199, | |
"rewards/margins": 0.15286779403686523, | |
"rewards/rejected": -0.4733859896659851, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.1887566680344686, | |
"grad_norm": 0.12770676612854004, | |
"learning_rate": 4.715508948078037e-06, | |
"logits/chosen": -2.216815710067749, | |
"logits/rejected": -2.759458541870117, | |
"logps/chosen": -0.21546092629432678, | |
"logps/rejected": -0.34664005041122437, | |
"loss": 6.966, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.32319143414497375, | |
"rewards/margins": 0.196768656373024, | |
"rewards/rejected": -0.5199600458145142, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.19696347968814115, | |
"grad_norm": 0.15062908828258514, | |
"learning_rate": 4.690766700109659e-06, | |
"logits/chosen": -2.2262110710144043, | |
"logits/rejected": -2.7840607166290283, | |
"logps/chosen": -0.2078159749507904, | |
"logps/rejected": -0.4006090760231018, | |
"loss": 7.012, | |
"rewards/accuracies": 0.625, | |
"rewards/chosen": -0.3117239773273468, | |
"rewards/margins": 0.2891896665096283, | |
"rewards/rejected": -0.6009136438369751, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"grad_norm": 0.24995267391204834, | |
"learning_rate": 4.665063509461098e-06, | |
"logits/chosen": -2.4198288917541504, | |
"logits/rejected": -2.8148205280303955, | |
"logps/chosen": -0.23191122710704803, | |
"logps/rejected": -0.38251757621765137, | |
"loss": 6.7812, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.34786686301231384, | |
"rewards/margins": 0.2259095013141632, | |
"rewards/rejected": -0.573776364326477, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.2051702913418137, | |
"eval_logits/chosen": -2.3532145023345947, | |
"eval_logits/rejected": -2.9015841484069824, | |
"eval_logps/chosen": -0.22620753943920135, | |
"eval_logps/rejected": -0.4290919005870819, | |
"eval_loss": 0.8434350490570068, | |
"eval_rewards/accuracies": 0.5959596037864685, | |
"eval_rewards/chosen": -0.33931130170822144, | |
"eval_rewards/margins": 0.304326593875885, | |
"eval_rewards/rejected": -0.6436378955841064, | |
"eval_runtime": 26.012, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.21337710299548626, | |
"grad_norm": 0.22134838998317719, | |
"learning_rate": 4.638410650401267e-06, | |
"logits/chosen": -2.4054629802703857, | |
"logits/rejected": -2.8117618560791016, | |
"logps/chosen": -0.20882606506347656, | |
"logps/rejected": -0.41059261560440063, | |
"loss": 6.6024, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.31323909759521484, | |
"rewards/margins": 0.3026497960090637, | |
"rewards/rejected": -0.6158889532089233, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.2215839146491588, | |
"grad_norm": 0.23838171362876892, | |
"learning_rate": 4.610819813755038e-06, | |
"logits/chosen": -2.534034490585327, | |
"logits/rejected": -2.846797227859497, | |
"logps/chosen": -0.2372482568025589, | |
"logps/rejected": -0.493452787399292, | |
"loss": 6.7377, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -0.35587236285209656, | |
"rewards/margins": 0.38430681824684143, | |
"rewards/rejected": -0.740179181098938, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.22979072630283134, | |
"grad_norm": 0.2903882563114166, | |
"learning_rate": 4.582303101775249e-06, | |
"logits/chosen": -2.4838695526123047, | |
"logits/rejected": -2.8494999408721924, | |
"logps/chosen": -0.23487380146980286, | |
"logps/rejected": -0.5447143316268921, | |
"loss": 6.6463, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.3523106873035431, | |
"rewards/margins": 0.4647606909275055, | |
"rewards/rejected": -0.8170714378356934, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.23799753795650389, | |
"grad_norm": 0.2977660596370697, | |
"learning_rate": 4.55287302283426e-06, | |
"logits/chosen": -2.539785861968994, | |
"logits/rejected": -2.922631025314331, | |
"logps/chosen": -0.2749824821949005, | |
"logps/rejected": -0.5817859768867493, | |
"loss": 6.2424, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.41247373819351196, | |
"rewards/margins": 0.4602052569389343, | |
"rewards/rejected": -0.8726789355278015, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"grad_norm": 0.388954222202301, | |
"learning_rate": 4.522542485937369e-06, | |
"logits/chosen": -2.6039352416992188, | |
"logits/rejected": -2.8011627197265625, | |
"logps/chosen": -0.37122753262519836, | |
"logps/rejected": -0.6376734972000122, | |
"loss": 6.3458, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.5568413138389587, | |
"rewards/margins": 0.39966899156570435, | |
"rewards/rejected": -0.9565103650093079, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.24620434961017645, | |
"eval_logits/chosen": -2.448641061782837, | |
"eval_logits/rejected": -2.8639307022094727, | |
"eval_logps/chosen": -0.35992980003356934, | |
"eval_logps/rejected": -0.7961164712905884, | |
"eval_loss": 0.7593368887901306, | |
"eval_rewards/accuracies": 0.6060606241226196, | |
"eval_rewards/chosen": -0.5398945808410645, | |
"eval_rewards/margins": 0.6542800664901733, | |
"eval_rewards/rejected": -1.1941747665405273, | |
"eval_runtime": 26.0378, | |
"eval_samples_per_second": 30.264, | |
"eval_steps_per_second": 3.802, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.254411161263849, | |
"grad_norm": 0.41667941212654114, | |
"learning_rate": 4.491324795060491e-06, | |
"logits/chosen": -2.596318483352661, | |
"logits/rejected": -2.8140273094177246, | |
"logps/chosen": -0.3550270199775696, | |
"logps/rejected": -0.673725426197052, | |
"loss": 6.2201, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -0.532540500164032, | |
"rewards/margins": 0.47804751992225647, | |
"rewards/rejected": -1.0105879306793213, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.26261797291752154, | |
"grad_norm": 0.5356656312942505, | |
"learning_rate": 4.4592336433146e-06, | |
"logits/chosen": -2.5131359100341797, | |
"logits/rejected": -2.7125701904296875, | |
"logps/chosen": -0.4790285527706146, | |
"logps/rejected": -0.9030188322067261, | |
"loss": 5.9656, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.7185428738594055, | |
"rewards/margins": 0.6359853744506836, | |
"rewards/rejected": -1.3545281887054443, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.2708247845711941, | |
"grad_norm": 0.6395894885063171, | |
"learning_rate": 4.426283106939474e-06, | |
"logits/chosen": -2.408463954925537, | |
"logits/rejected": -2.784302234649658, | |
"logps/chosen": -0.43364158272743225, | |
"logps/rejected": -0.8759375810623169, | |
"loss": 5.6972, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -0.6504624485969543, | |
"rewards/margins": 0.6634438037872314, | |
"rewards/rejected": -1.313906192779541, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.2790315962248666, | |
"grad_norm": 0.5482347011566162, | |
"learning_rate": 4.3924876391293915e-06, | |
"logits/chosen": -2.4591078758239746, | |
"logits/rejected": -2.7352993488311768, | |
"logps/chosen": -0.4786604344844818, | |
"logps/rejected": -1.337053894996643, | |
"loss": 5.5123, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.7179905772209167, | |
"rewards/margins": 1.2875900268554688, | |
"rewards/rejected": -2.0055809020996094, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"grad_norm": 0.44611233472824097, | |
"learning_rate": 4.357862063693486e-06, | |
"logits/chosen": -2.426030397415161, | |
"logits/rejected": -2.7272305488586426, | |
"logps/chosen": -0.5190738439559937, | |
"logps/rejected": -1.3207228183746338, | |
"loss": 5.242, | |
"rewards/accuracies": 0.6875, | |
"rewards/chosen": -0.7786108255386353, | |
"rewards/margins": 1.2024734020233154, | |
"rewards/rejected": -1.9810841083526611, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.2872384078785392, | |
"eval_logits/chosen": -2.5548582077026367, | |
"eval_logits/rejected": -2.8742663860321045, | |
"eval_logps/chosen": -0.5429244637489319, | |
"eval_logps/rejected": -1.556882619857788, | |
"eval_loss": 0.6406257748603821, | |
"eval_rewards/accuracies": 0.6464646458625793, | |
"eval_rewards/chosen": -0.8143868446350098, | |
"eval_rewards/margins": 1.520937204360962, | |
"eval_rewards/rejected": -2.335324287414551, | |
"eval_runtime": 26.0367, | |
"eval_samples_per_second": 30.265, | |
"eval_steps_per_second": 3.802, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.29544521953221176, | |
"grad_norm": 0.6435768008232117, | |
"learning_rate": 4.322421568553529e-06, | |
"logits/chosen": -2.5521063804626465, | |
"logits/rejected": -2.8884310722351074, | |
"logps/chosen": -0.5124669075012207, | |
"logps/rejected": -1.4227123260498047, | |
"loss": 5.2755, | |
"rewards/accuracies": 0.6499999761581421, | |
"rewards/chosen": -0.768700361251831, | |
"rewards/margins": 1.365368127822876, | |
"rewards/rejected": -2.134068489074707, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.3036520311858843, | |
"grad_norm": 0.7270930409431458, | |
"learning_rate": 4.286181699082008e-06, | |
"logits/chosen": -2.613312244415283, | |
"logits/rejected": -2.8482322692871094, | |
"logps/chosen": -0.7397282719612122, | |
"logps/rejected": -1.7584373950958252, | |
"loss": 4.6592, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.1095924377441406, | |
"rewards/margins": 1.528063416481018, | |
"rewards/rejected": -2.6376559734344482, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.31185884283955684, | |
"grad_norm": 0.417350172996521, | |
"learning_rate": 4.249158351283414e-06, | |
"logits/chosen": -2.6574740409851074, | |
"logits/rejected": -2.8720998764038086, | |
"logps/chosen": -0.7095499634742737, | |
"logps/rejected": -2.2104392051696777, | |
"loss": 4.4512, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -1.0643248558044434, | |
"rewards/margins": 2.2513341903686523, | |
"rewards/rejected": -3.3156590461730957, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.3200656544932294, | |
"grad_norm": 0.5288704633712769, | |
"learning_rate": 4.211367764821722e-06, | |
"logits/chosen": -2.7438926696777344, | |
"logits/rejected": -2.9578990936279297, | |
"logps/chosen": -0.7221090793609619, | |
"logps/rejected": -2.169327735900879, | |
"loss": 4.8545, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -1.0831634998321533, | |
"rewards/margins": 2.170828104019165, | |
"rewards/rejected": -3.2539916038513184, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"grad_norm": 0.5342369675636292, | |
"learning_rate": 4.172826515897146e-06, | |
"logits/chosen": -2.451097011566162, | |
"logits/rejected": -2.8298330307006836, | |
"logps/chosen": -0.6682878732681274, | |
"logps/rejected": -2.4527621269226074, | |
"loss": 4.5987, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -1.0024317502975464, | |
"rewards/margins": 2.676711320877075, | |
"rewards/rejected": -3.679143190383911, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.3282724661469019, | |
"eval_logits/chosen": -2.486518621444702, | |
"eval_logits/rejected": -2.884521722793579, | |
"eval_logps/chosen": -0.6234225630760193, | |
"eval_logps/rejected": -2.3772060871124268, | |
"eval_loss": 0.5354036688804626, | |
"eval_rewards/accuracies": 0.6464646458625793, | |
"eval_rewards/chosen": -0.9351338744163513, | |
"eval_rewards/margins": 2.6306753158569336, | |
"eval_rewards/rejected": -3.5658092498779297, | |
"eval_runtime": 26.0392, | |
"eval_samples_per_second": 30.262, | |
"eval_steps_per_second": 3.802, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.33647927780057446, | |
"grad_norm": 0.3894485831260681, | |
"learning_rate": 4.133551509975264e-06, | |
"logits/chosen": -2.498328685760498, | |
"logits/rejected": -2.832383394241333, | |
"logps/chosen": -0.8072039484977722, | |
"logps/rejected": -3.0143685340881348, | |
"loss": 4.1825, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.210806131362915, | |
"rewards/margins": 3.310746669769287, | |
"rewards/rejected": -4.521553039550781, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.344686089454247, | |
"grad_norm": 1.1384832859039307, | |
"learning_rate": 4.093559974371725e-06, | |
"logits/chosen": -2.3939032554626465, | |
"logits/rejected": -2.736551284790039, | |
"logps/chosen": -0.8623531460762024, | |
"logps/rejected": -3.228794813156128, | |
"loss": 4.0383, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.293529748916626, | |
"rewards/margins": 3.5496623516082764, | |
"rewards/rejected": -4.843192100524902, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.3528929011079196, | |
"grad_norm": 0.5636156797409058, | |
"learning_rate": 4.052869450695776e-06, | |
"logits/chosen": -2.466431140899658, | |
"logits/rejected": -2.7423882484436035, | |
"logps/chosen": -0.867216944694519, | |
"logps/rejected": -2.9713022708892822, | |
"loss": 3.9764, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.3008254766464233, | |
"rewards/margins": 3.156127691268921, | |
"rewards/rejected": -4.456953525543213, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.36109971276159214, | |
"grad_norm": 1.3254886865615845, | |
"learning_rate": 4.011497787155938e-06, | |
"logits/chosen": -2.438335418701172, | |
"logits/rejected": -2.716845989227295, | |
"logps/chosen": -0.9805113673210144, | |
"logps/rejected": -3.646573543548584, | |
"loss": 4.0643, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -1.4707671403884888, | |
"rewards/margins": 3.9990932941436768, | |
"rewards/rejected": -5.469860553741455, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.3693065244152647, | |
"grad_norm": 1.0398495197296143, | |
"learning_rate": 3.969463130731183e-06, | |
"logits/chosen": -2.3812689781188965, | |
"logits/rejected": -2.7526910305023193, | |
"logps/chosen": -1.0975981950759888, | |
"logps/rejected": -3.9439749717712402, | |
"loss": 3.9749, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -1.6463972330093384, | |
"rewards/margins": 4.269565105438232, | |
"rewards/rejected": -5.915962219238281, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3693065244152647, | |
"eval_logits/chosen": -2.4038827419281006, | |
"eval_logits/rejected": -2.7936673164367676, | |
"eval_logps/chosen": -1.0481945276260376, | |
"eval_logps/rejected": -3.5816102027893066, | |
"eval_loss": 0.47537824511528015, | |
"eval_rewards/accuracies": 0.7373737096786499, | |
"eval_rewards/chosen": -1.5722918510437012, | |
"eval_rewards/margins": 3.800123691558838, | |
"eval_rewards/rejected": -5.372415542602539, | |
"eval_runtime": 26.0116, | |
"eval_samples_per_second": 30.294, | |
"eval_steps_per_second": 3.806, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.3775133360689372, | |
"grad_norm": 1.091323971748352, | |
"learning_rate": 3.92678391921108e-06, | |
"logits/chosen": -2.5687508583068848, | |
"logits/rejected": -2.766624927520752, | |
"logps/chosen": -1.1868960857391357, | |
"logps/rejected": -3.4050726890563965, | |
"loss": 4.114, | |
"rewards/accuracies": 0.762499988079071, | |
"rewards/chosen": -1.780344009399414, | |
"rewards/margins": 3.3272647857666016, | |
"rewards/rejected": -5.107609272003174, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.38572014772260976, | |
"grad_norm": 0.8932163715362549, | |
"learning_rate": 3.88347887310836e-06, | |
"logits/chosen": -2.438856601715088, | |
"logits/rejected": -2.8660335540771484, | |
"logps/chosen": -1.420062780380249, | |
"logps/rejected": -4.232975006103516, | |
"loss": 3.9707, | |
"rewards/accuracies": 0.7749999761581421, | |
"rewards/chosen": -2.130094051361084, | |
"rewards/margins": 4.219367980957031, | |
"rewards/rejected": -6.349462032318115, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.3939269593762823, | |
"grad_norm": 1.110568642616272, | |
"learning_rate": 3.839566987447492e-06, | |
"logits/chosen": -2.5447945594787598, | |
"logits/rejected": -2.720705032348633, | |
"logps/chosen": -1.9743589162826538, | |
"logps/rejected": -3.963395595550537, | |
"loss": 3.7559, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -2.961538314819336, | |
"rewards/margins": 2.983555316925049, | |
"rewards/rejected": -5.945094108581543, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.40213377102995485, | |
"grad_norm": 1.1167631149291992, | |
"learning_rate": 3.795067523432826e-06, | |
"logits/chosen": -2.6660027503967285, | |
"logits/rejected": -2.839933395385742, | |
"logps/chosen": -2.3371267318725586, | |
"logps/rejected": -4.315327167510986, | |
"loss": 3.5333, | |
"rewards/accuracies": 0.7875000238418579, | |
"rewards/chosen": -3.505690097808838, | |
"rewards/margins": 2.9673006534576416, | |
"rewards/rejected": -6.4729905128479, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.4103405826836274, | |
"grad_norm": 1.7681843042373657, | |
"learning_rate": 3.7500000000000005e-06, | |
"logits/chosen": -2.4737250804901123, | |
"logits/rejected": -2.7522482872009277, | |
"logps/chosen": -2.802852153778076, | |
"logps/rejected": -5.4331955909729, | |
"loss": 3.1958, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -4.204278469085693, | |
"rewards/margins": 3.9455153942108154, | |
"rewards/rejected": -8.14979362487793, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4103405826836274, | |
"eval_logits/chosen": -2.464381456375122, | |
"eval_logits/rejected": -2.8693907260894775, | |
"eval_logps/chosen": -2.83821177482605, | |
"eval_logps/rejected": -5.77597188949585, | |
"eval_loss": 0.3702296018600464, | |
"eval_rewards/accuracies": 0.8383838534355164, | |
"eval_rewards/chosen": -4.257318019866943, | |
"eval_rewards/margins": 4.406640529632568, | |
"eval_rewards/rejected": -8.663958549499512, | |
"eval_runtime": 26.0401, | |
"eval_samples_per_second": 30.261, | |
"eval_steps_per_second": 3.802, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.4185473943373, | |
"grad_norm": 2.1794912815093994, | |
"learning_rate": 3.7043841852542884e-06, | |
"logits/chosen": -2.4379305839538574, | |
"logits/rejected": -2.823613166809082, | |
"logps/chosen": -3.3340702056884766, | |
"logps/rejected": -6.568157196044922, | |
"loss": 2.8418, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -5.001105308532715, | |
"rewards/margins": 4.851131439208984, | |
"rewards/rejected": -9.8522367477417, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.4267542059909725, | |
"grad_norm": 2.4649722576141357, | |
"learning_rate": 3.658240087799655e-06, | |
"logits/chosen": -2.627397298812866, | |
"logits/rejected": -2.9164626598358154, | |
"logps/chosen": -5.047720909118652, | |
"logps/rejected": -7.8882904052734375, | |
"loss": 2.73, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -7.5715813636779785, | |
"rewards/margins": 4.2608537673950195, | |
"rewards/rejected": -11.832433700561523, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.43496101764464506, | |
"grad_norm": 2.53389835357666, | |
"learning_rate": 3.611587947962319e-06, | |
"logits/chosen": -2.4901440143585205, | |
"logits/rejected": -2.8462088108062744, | |
"logps/chosen": -4.257150173187256, | |
"logps/rejected": -7.8700971603393555, | |
"loss": 2.5886, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -6.385725498199463, | |
"rewards/margins": 5.419419288635254, | |
"rewards/rejected": -11.805145263671875, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.4431678292983176, | |
"grad_norm": 5.214587688446045, | |
"learning_rate": 3.564448228912682e-06, | |
"logits/chosen": -2.4020960330963135, | |
"logits/rejected": -2.8110060691833496, | |
"logps/chosen": -4.833271503448486, | |
"logps/rejected": -8.95389461517334, | |
"loss": 2.5918, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -7.24990701675415, | |
"rewards/margins": 6.180935859680176, | |
"rewards/rejected": -13.4308443069458, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.45137464095199015, | |
"grad_norm": 2.7313215732574463, | |
"learning_rate": 3.516841607689501e-06, | |
"logits/chosen": -2.5269761085510254, | |
"logits/rejected": -2.780574321746826, | |
"logps/chosen": -5.635412693023682, | |
"logps/rejected": -9.682252883911133, | |
"loss": 2.3387, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -8.453118324279785, | |
"rewards/margins": 6.070260524749756, | |
"rewards/rejected": -14.5233793258667, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.45137464095199015, | |
"eval_logits/chosen": -2.4869625568389893, | |
"eval_logits/rejected": -2.8525495529174805, | |
"eval_logps/chosen": -5.279529094696045, | |
"eval_logps/rejected": -9.829012870788574, | |
"eval_loss": 0.28920263051986694, | |
"eval_rewards/accuracies": 0.8888888955116272, | |
"eval_rewards/chosen": -7.91929292678833, | |
"eval_rewards/margins": 6.824225425720215, | |
"eval_rewards/rejected": -14.743520736694336, | |
"eval_runtime": 26.0459, | |
"eval_samples_per_second": 30.254, | |
"eval_steps_per_second": 3.801, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.4595814526056627, | |
"grad_norm": 7.664855003356934, | |
"learning_rate": 3.4687889661302577e-06, | |
"logits/chosen": -2.517958164215088, | |
"logits/rejected": -2.821098804473877, | |
"logps/chosen": -6.172144889831543, | |
"logps/rejected": -11.623320579528809, | |
"loss": 2.1915, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -9.258216857910156, | |
"rewards/margins": 8.176764488220215, | |
"rewards/rejected": -17.434982299804688, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.46778826425933523, | |
"grad_norm": 5.298526287078857, | |
"learning_rate": 3.4203113817116955e-06, | |
"logits/chosen": -2.4968509674072266, | |
"logits/rejected": -2.7755746841430664, | |
"logps/chosen": -6.602853298187256, | |
"logps/rejected": -12.261907577514648, | |
"loss": 1.9401, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -9.904280662536621, | |
"rewards/margins": 8.488581657409668, | |
"rewards/rejected": -18.39286231994629, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.47599507591300777, | |
"grad_norm": 3.2574403285980225, | |
"learning_rate": 3.3714301183045382e-06, | |
"logits/chosen": -2.510072708129883, | |
"logits/rejected": -2.7066006660461426, | |
"logps/chosen": -6.970999240875244, | |
"logps/rejected": -12.280068397521973, | |
"loss": 2.201, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -10.456499099731445, | |
"rewards/margins": 7.963601589202881, | |
"rewards/rejected": -18.420101165771484, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.48420188756668037, | |
"grad_norm": 7.752432346343994, | |
"learning_rate": 3.3221666168464584e-06, | |
"logits/chosen": -2.4650096893310547, | |
"logits/rejected": -2.7430062294006348, | |
"logps/chosen": -7.387778282165527, | |
"logps/rejected": -12.886815071105957, | |
"loss": 1.7232, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -11.081666946411133, | |
"rewards/margins": 8.248555183410645, | |
"rewards/rejected": -19.330224990844727, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.4924086992203529, | |
"grad_norm": 7.162052154541016, | |
"learning_rate": 3.272542485937369e-06, | |
"logits/chosen": -2.502263069152832, | |
"logits/rejected": -2.775761365890503, | |
"logps/chosen": -7.409277439117432, | |
"logps/rejected": -12.631124496459961, | |
"loss": 1.7381, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -11.11391544342041, | |
"rewards/margins": 7.832771301269531, | |
"rewards/rejected": -18.946685791015625, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.4924086992203529, | |
"eval_logits/chosen": -2.414571523666382, | |
"eval_logits/rejected": -2.7700724601745605, | |
"eval_logps/chosen": -6.709319591522217, | |
"eval_logps/rejected": -12.794181823730469, | |
"eval_loss": 0.22023409605026245, | |
"eval_rewards/accuracies": 0.9494949579238892, | |
"eval_rewards/chosen": -10.063980102539062, | |
"eval_rewards/margins": 9.127293586730957, | |
"eval_rewards/rejected": -19.191272735595703, | |
"eval_runtime": 26.0239, | |
"eval_samples_per_second": 30.28, | |
"eval_steps_per_second": 3.804, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.5006155108740254, | |
"grad_norm": 6.312295436859131, | |
"learning_rate": 3.222579492361179e-06, | |
"logits/chosen": -2.48272705078125, | |
"logits/rejected": -2.7790474891662598, | |
"logps/chosen": -7.929927825927734, | |
"logps/rejected": -14.165933609008789, | |
"loss": 1.8554, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -11.894890785217285, | |
"rewards/margins": 9.354007720947266, | |
"rewards/rejected": -21.248899459838867, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.508822322527698, | |
"grad_norm": 5.254099369049072, | |
"learning_rate": 3.1722995515381644e-06, | |
"logits/chosen": -2.4686760902404785, | |
"logits/rejected": -2.7497403621673584, | |
"logps/chosen": -6.739262580871582, | |
"logps/rejected": -13.017419815063477, | |
"loss": 1.5174, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -10.108892440795898, | |
"rewards/margins": 9.417237281799316, | |
"rewards/rejected": -19.52613067626953, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.5170291341813705, | |
"grad_norm": 2.79254412651062, | |
"learning_rate": 3.121724717912138e-06, | |
"logits/chosen": -2.5906078815460205, | |
"logits/rejected": -2.776789426803589, | |
"logps/chosen": -8.108521461486816, | |
"logps/rejected": -13.654635429382324, | |
"loss": 1.8195, | |
"rewards/accuracies": 0.9375, | |
"rewards/chosen": -12.162781715393066, | |
"rewards/margins": 8.319172859191895, | |
"rewards/rejected": -20.48195457458496, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.5252359458350431, | |
"grad_norm": 4.617709636688232, | |
"learning_rate": 3.0708771752766397e-06, | |
"logits/chosen": -2.4493868350982666, | |
"logits/rejected": -2.7978568077087402, | |
"logps/chosen": -7.94122314453125, | |
"logps/rejected": -14.420333862304688, | |
"loss": 1.7681, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -11.911835670471191, | |
"rewards/margins": 9.718667984008789, | |
"rewards/rejected": -21.630504608154297, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.5334427574887156, | |
"grad_norm": 15.788602828979492, | |
"learning_rate": 3.019779227044398e-06, | |
"logits/chosen": -2.495669364929199, | |
"logits/rejected": -2.799513101577759, | |
"logps/chosen": -7.9494948387146, | |
"logps/rejected": -14.22314167022705, | |
"loss": 1.7372, | |
"rewards/accuracies": 0.9125000238418579, | |
"rewards/chosen": -11.924242973327637, | |
"rewards/margins": 9.410469055175781, | |
"rewards/rejected": -21.3347110748291, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5334427574887156, | |
"eval_logits/chosen": -2.4212491512298584, | |
"eval_logits/rejected": -2.7830140590667725, | |
"eval_logps/chosen": -6.399094581604004, | |
"eval_logps/rejected": -13.696700096130371, | |
"eval_loss": 0.19624173641204834, | |
"eval_rewards/accuracies": 0.9595959782600403, | |
"eval_rewards/chosen": -9.598641395568848, | |
"eval_rewards/margins": 10.94640827178955, | |
"eval_rewards/rejected": -20.5450496673584, | |
"eval_runtime": 26.0246, | |
"eval_samples_per_second": 30.279, | |
"eval_steps_per_second": 3.804, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.5416495691423882, | |
"grad_norm": 1.8350448608398438, | |
"learning_rate": 2.9684532864643123e-06, | |
"logits/chosen": -2.4959726333618164, | |
"logits/rejected": -2.7555196285247803, | |
"logps/chosen": -7.877779483795166, | |
"logps/rejected": -15.448628425598145, | |
"loss": 1.2842, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -11.816670417785645, | |
"rewards/margins": 11.35627269744873, | |
"rewards/rejected": -23.172941207885742, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.5498563807960607, | |
"grad_norm": 5.017009735107422, | |
"learning_rate": 2.9169218667902562e-06, | |
"logits/chosen": -2.4682934284210205, | |
"logits/rejected": -2.7640793323516846, | |
"logps/chosen": -8.19635009765625, | |
"logps/rejected": -16.366384506225586, | |
"loss": 1.8156, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -12.294524192810059, | |
"rewards/margins": 12.255053520202637, | |
"rewards/rejected": -24.549579620361328, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.5580631924497332, | |
"grad_norm": 5.384292125701904, | |
"learning_rate": 2.8652075714060296e-06, | |
"logits/chosen": -2.381718873977661, | |
"logits/rejected": -2.8303215503692627, | |
"logps/chosen": -7.562445163726807, | |
"logps/rejected": -16.368242263793945, | |
"loss": 1.4923, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -11.343667984008789, | |
"rewards/margins": 13.208694458007812, | |
"rewards/rejected": -24.5523624420166, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.5662700041034058, | |
"grad_norm": 4.969016075134277, | |
"learning_rate": 2.813333083910761e-06, | |
"logits/chosen": -2.449165105819702, | |
"logits/rejected": -2.7401328086853027, | |
"logps/chosen": -7.27236795425415, | |
"logps/rejected": -15.723406791687012, | |
"loss": 1.464, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -10.908552169799805, | |
"rewards/margins": 12.676557540893555, | |
"rewards/rejected": -23.58510971069336, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.5744768157570784, | |
"grad_norm": 7.394704818725586, | |
"learning_rate": 2.761321158169134e-06, | |
"logits/chosen": -2.392939805984497, | |
"logits/rejected": -2.800342559814453, | |
"logps/chosen": -7.558518409729004, | |
"logps/rejected": -17.065820693969727, | |
"loss": 1.2698, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -11.337777137756348, | |
"rewards/margins": 14.260955810546875, | |
"rewards/rejected": -25.598730087280273, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.5744768157570784, | |
"eval_logits/chosen": -2.3927063941955566, | |
"eval_logits/rejected": -2.72641921043396, | |
"eval_logps/chosen": -6.722636699676514, | |
"eval_logps/rejected": -15.053749084472656, | |
"eval_loss": 0.16624146699905396, | |
"eval_rewards/accuracies": 0.9595959782600403, | |
"eval_rewards/chosen": -10.083953857421875, | |
"eval_rewards/margins": 12.496668815612793, | |
"eval_rewards/rejected": -22.580623626708984, | |
"eval_runtime": 26.0296, | |
"eval_samples_per_second": 30.273, | |
"eval_steps_per_second": 3.803, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.582683627410751, | |
"grad_norm": 2.325347423553467, | |
"learning_rate": 2.70919460833079e-06, | |
"logits/chosen": -2.3837151527404785, | |
"logits/rejected": -2.718639850616455, | |
"logps/chosen": -7.265778541564941, | |
"logps/rejected": -16.18191909790039, | |
"loss": 1.4758, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -10.898667335510254, | |
"rewards/margins": 13.374208450317383, | |
"rewards/rejected": -24.272876739501953, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.5908904390644235, | |
"grad_norm": 8.78543758392334, | |
"learning_rate": 2.6569762988232838e-06, | |
"logits/chosen": -2.3132755756378174, | |
"logits/rejected": -2.718923807144165, | |
"logps/chosen": -7.462459564208984, | |
"logps/rejected": -16.323883056640625, | |
"loss": 1.4369, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -11.193689346313477, | |
"rewards/margins": 13.292137145996094, | |
"rewards/rejected": -24.485824584960938, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.599097250718096, | |
"grad_norm": 5.5666584968566895, | |
"learning_rate": 2.604689134322999e-06, | |
"logits/chosen": -2.3744144439697266, | |
"logits/rejected": -2.6114182472229004, | |
"logps/chosen": -7.262286186218262, | |
"logps/rejected": -15.194799423217773, | |
"loss": 1.3424, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -10.893428802490234, | |
"rewards/margins": 11.898767471313477, | |
"rewards/rejected": -22.79219627380371, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.6073040623717686, | |
"grad_norm": 2.3607444763183594, | |
"learning_rate": 2.5523560497083927e-06, | |
"logits/chosen": -2.391589641571045, | |
"logits/rejected": -2.649766445159912, | |
"logps/chosen": -8.12696647644043, | |
"logps/rejected": -17.4465274810791, | |
"loss": 1.2588, | |
"rewards/accuracies": 1.0, | |
"rewards/chosen": -12.190450668334961, | |
"rewards/margins": 13.979342460632324, | |
"rewards/rejected": -26.169790267944336, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.6155108740254411, | |
"grad_norm": 3.9366135597229004, | |
"learning_rate": 2.5e-06, | |
"logits/chosen": -2.4100446701049805, | |
"logits/rejected": -2.6920006275177, | |
"logps/chosen": -8.519265174865723, | |
"logps/rejected": -16.66709327697754, | |
"loss": 1.3798, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -12.778898239135742, | |
"rewards/margins": 12.221744537353516, | |
"rewards/rejected": -25.000640869140625, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6155108740254411, | |
"eval_logits/chosen": -2.377652406692505, | |
"eval_logits/rejected": -2.7032172679901123, | |
"eval_logps/chosen": -7.426075458526611, | |
"eval_logps/rejected": -15.338685035705566, | |
"eval_loss": 0.1500895917415619, | |
"eval_rewards/accuracies": 0.9494949579238892, | |
"eval_rewards/chosen": -11.139113426208496, | |
"eval_rewards/margins": 11.868914604187012, | |
"eval_rewards/rejected": -23.008026123046875, | |
"eval_runtime": 26.0362, | |
"eval_samples_per_second": 30.266, | |
"eval_steps_per_second": 3.802, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.6237176856791137, | |
"grad_norm": 6.827406406402588, | |
"learning_rate": 2.447643950291608e-06, | |
"logits/chosen": -2.3275043964385986, | |
"logits/rejected": -2.6344006061553955, | |
"logps/chosen": -7.848365783691406, | |
"logps/rejected": -17.661056518554688, | |
"loss": 0.8437, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -11.77254867553711, | |
"rewards/margins": 14.719035148620605, | |
"rewards/rejected": -26.4915828704834, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.6319244973327862, | |
"grad_norm": 3.160689353942871, | |
"learning_rate": 2.3953108656770018e-06, | |
"logits/chosen": -2.480649709701538, | |
"logits/rejected": -2.686522960662842, | |
"logps/chosen": -9.342796325683594, | |
"logps/rejected": -18.42390251159668, | |
"loss": 1.1501, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -14.014195442199707, | |
"rewards/margins": 13.62165641784668, | |
"rewards/rejected": -27.635854721069336, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.6401313089864588, | |
"grad_norm": 3.751577854156494, | |
"learning_rate": 2.3430237011767166e-06, | |
"logits/chosen": -2.405439853668213, | |
"logits/rejected": -2.7555787563323975, | |
"logps/chosen": -7.906872749328613, | |
"logps/rejected": -19.13258171081543, | |
"loss": 1.1781, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -11.860309600830078, | |
"rewards/margins": 16.838565826416016, | |
"rewards/rejected": -28.69887351989746, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.6483381206401313, | |
"grad_norm": 3.2225842475891113, | |
"learning_rate": 2.290805391669212e-06, | |
"logits/chosen": -2.48250412940979, | |
"logits/rejected": -2.7523207664489746, | |
"logps/chosen": -9.760643005371094, | |
"logps/rejected": -18.688955307006836, | |
"loss": 0.8935, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -14.640963554382324, | |
"rewards/margins": 13.392468452453613, | |
"rewards/rejected": -28.033432006835938, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.6565449322938038, | |
"grad_norm": 6.352996826171875, | |
"learning_rate": 2.238678841830867e-06, | |
"logits/chosen": -2.417715072631836, | |
"logits/rejected": -2.649390697479248, | |
"logps/chosen": -9.004033088684082, | |
"logps/rejected": -18.346654891967773, | |
"loss": 0.908, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -13.506048202514648, | |
"rewards/margins": 14.013933181762695, | |
"rewards/rejected": -27.51997947692871, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.6565449322938038, | |
"eval_logits/chosen": -2.3531436920166016, | |
"eval_logits/rejected": -2.679354667663574, | |
"eval_logps/chosen": -8.068766593933105, | |
"eval_logps/rejected": -18.179340362548828, | |
"eval_loss": 0.13774718344211578, | |
"eval_rewards/accuracies": 0.9595959782600403, | |
"eval_rewards/chosen": -12.1031494140625, | |
"eval_rewards/margins": 15.165861129760742, | |
"eval_rewards/rejected": -27.26900863647461, | |
"eval_runtime": 26.0374, | |
"eval_samples_per_second": 30.264, | |
"eval_steps_per_second": 3.802, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.6647517439474764, | |
"grad_norm": 24.157991409301758, | |
"learning_rate": 2.186666916089239e-06, | |
"logits/chosen": -2.443021297454834, | |
"logits/rejected": -2.726386308670044, | |
"logps/chosen": -9.991727828979492, | |
"logps/rejected": -19.389759063720703, | |
"loss": 0.8971, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -14.987591743469238, | |
"rewards/margins": 14.0970458984375, | |
"rewards/rejected": -29.084636688232422, | |
"step": 810 | |
}, | |
{ | |
"epoch": 0.6729585556011489, | |
"grad_norm": 1.581506371498108, | |
"learning_rate": 2.134792428593971e-06, | |
"logits/chosen": -2.352402687072754, | |
"logits/rejected": -2.5905702114105225, | |
"logps/chosen": -8.19101619720459, | |
"logps/rejected": -18.939958572387695, | |
"loss": 1.0462, | |
"rewards/accuracies": 1.0, | |
"rewards/chosen": -12.286526679992676, | |
"rewards/margins": 16.123409271240234, | |
"rewards/rejected": -28.409936904907227, | |
"step": 820 | |
}, | |
{ | |
"epoch": 0.6811653672548215, | |
"grad_norm": 0.910886824131012, | |
"learning_rate": 2.0830781332097446e-06, | |
"logits/chosen": -2.4748313426971436, | |
"logits/rejected": -2.702345371246338, | |
"logps/chosen": -9.464627265930176, | |
"logps/rejected": -18.847036361694336, | |
"loss": 1.0372, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -14.196942329406738, | |
"rewards/margins": 14.07361125946045, | |
"rewards/rejected": -28.270553588867188, | |
"step": 830 | |
}, | |
{ | |
"epoch": 0.689372178908494, | |
"grad_norm": 1.8233784437179565, | |
"learning_rate": 2.031546713535688e-06, | |
"logits/chosen": -2.460084915161133, | |
"logits/rejected": -2.678401470184326, | |
"logps/chosen": -9.879974365234375, | |
"logps/rejected": -18.783390045166016, | |
"loss": 0.9703, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -14.819961547851562, | |
"rewards/margins": 13.355127334594727, | |
"rewards/rejected": -28.17508888244629, | |
"step": 840 | |
}, | |
{ | |
"epoch": 0.6975789905621665, | |
"grad_norm": 3.4594898223876953, | |
"learning_rate": 1.9802207729556023e-06, | |
"logits/chosen": -2.3830952644348145, | |
"logits/rejected": -2.6068103313446045, | |
"logps/chosen": -9.286922454833984, | |
"logps/rejected": -18.89388656616211, | |
"loss": 1.0259, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -13.930383682250977, | |
"rewards/margins": 14.410444259643555, | |
"rewards/rejected": -28.340829849243164, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.6975789905621665, | |
"eval_logits/chosen": -2.3181838989257812, | |
"eval_logits/rejected": -2.6507365703582764, | |
"eval_logps/chosen": -7.670541286468506, | |
"eval_logps/rejected": -18.20172119140625, | |
"eval_loss": 0.12306927889585495, | |
"eval_rewards/accuracies": 0.9696969985961914, | |
"eval_rewards/chosen": -11.505812644958496, | |
"eval_rewards/margins": 15.796772956848145, | |
"eval_rewards/rejected": -27.302583694458008, | |
"eval_runtime": 26.0288, | |
"eval_samples_per_second": 30.274, | |
"eval_steps_per_second": 3.803, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.7057858022158392, | |
"grad_norm": 11.1419038772583, | |
"learning_rate": 1.9291228247233607e-06, | |
"logits/chosen": -2.438063859939575, | |
"logits/rejected": -2.653733730316162, | |
"logps/chosen": -9.637109756469727, | |
"logps/rejected": -20.197423934936523, | |
"loss": 0.781, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -14.455665588378906, | |
"rewards/margins": 15.840471267700195, | |
"rewards/rejected": -30.296138763427734, | |
"step": 860 | |
}, | |
{ | |
"epoch": 0.7139926138695117, | |
"grad_norm": 8.1745023727417, | |
"learning_rate": 1.8782752820878636e-06, | |
"logits/chosen": -2.292649269104004, | |
"logits/rejected": -2.5739500522613525, | |
"logps/chosen": -7.215020656585693, | |
"logps/rejected": -18.043460845947266, | |
"loss": 0.845, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -10.822530746459961, | |
"rewards/margins": 16.242664337158203, | |
"rewards/rejected": -27.065195083618164, | |
"step": 870 | |
}, | |
{ | |
"epoch": 0.7221994255231843, | |
"grad_norm": 49.25035858154297, | |
"learning_rate": 1.827700448461836e-06, | |
"logits/chosen": -2.3827719688415527, | |
"logits/rejected": -2.6681952476501465, | |
"logps/chosen": -8.750313758850098, | |
"logps/rejected": -17.976327896118164, | |
"loss": 0.8561, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -13.125471115112305, | |
"rewards/margins": 13.839022636413574, | |
"rewards/rejected": -26.964492797851562, | |
"step": 880 | |
}, | |
{ | |
"epoch": 0.7304062371768568, | |
"grad_norm": 5.7622504234313965, | |
"learning_rate": 1.7774205076388207e-06, | |
"logits/chosen": -2.3274879455566406, | |
"logits/rejected": -2.6361851692199707, | |
"logps/chosen": -6.857377052307129, | |
"logps/rejected": -16.838680267333984, | |
"loss": 0.8304, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -10.286066055297852, | |
"rewards/margins": 14.971952438354492, | |
"rewards/rejected": -25.258018493652344, | |
"step": 890 | |
}, | |
{ | |
"epoch": 0.7386130488305294, | |
"grad_norm": 8.557459831237793, | |
"learning_rate": 1.7274575140626318e-06, | |
"logits/chosen": -2.393751859664917, | |
"logits/rejected": -2.641291379928589, | |
"logps/chosen": -7.221099853515625, | |
"logps/rejected": -17.681804656982422, | |
"loss": 0.7794, | |
"rewards/accuracies": 0.925000011920929, | |
"rewards/chosen": -10.831649780273438, | |
"rewards/margins": 15.691058158874512, | |
"rewards/rejected": -26.522708892822266, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.7386130488305294, | |
"eval_logits/chosen": -2.3039753437042236, | |
"eval_logits/rejected": -2.6469638347625732, | |
"eval_logps/chosen": -6.395583152770996, | |
"eval_logps/rejected": -16.935155868530273, | |
"eval_loss": 0.11225885152816772, | |
"eval_rewards/accuracies": 0.9797979593276978, | |
"eval_rewards/chosen": -9.593375205993652, | |
"eval_rewards/margins": 15.809358596801758, | |
"eval_rewards/rejected": -25.402732849121094, | |
"eval_runtime": 26.0286, | |
"eval_samples_per_second": 30.274, | |
"eval_steps_per_second": 3.804, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.7468198604842019, | |
"grad_norm": 1.0288090705871582, | |
"learning_rate": 1.677833383153542e-06, | |
"logits/chosen": -2.3569540977478027, | |
"logits/rejected": -2.668043613433838, | |
"logps/chosen": -8.015088081359863, | |
"logps/rejected": -19.568172454833984, | |
"loss": 0.8783, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -12.022631645202637, | |
"rewards/margins": 17.32962989807129, | |
"rewards/rejected": -29.35226058959961, | |
"step": 910 | |
}, | |
{ | |
"epoch": 0.7550266721378744, | |
"grad_norm": 30.93399429321289, | |
"learning_rate": 1.6285698816954626e-06, | |
"logits/chosen": -2.2761807441711426, | |
"logits/rejected": -2.577350378036499, | |
"logps/chosen": -7.287099361419678, | |
"logps/rejected": -18.970417022705078, | |
"loss": 0.88, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -10.930647850036621, | |
"rewards/margins": 17.524978637695312, | |
"rewards/rejected": -28.45562744140625, | |
"step": 920 | |
}, | |
{ | |
"epoch": 0.763233483791547, | |
"grad_norm": 4.3290534019470215, | |
"learning_rate": 1.5796886182883053e-06, | |
"logits/chosen": -2.391355037689209, | |
"logits/rejected": -2.6769471168518066, | |
"logps/chosen": -7.963728427886963, | |
"logps/rejected": -18.105741500854492, | |
"loss": 0.8804, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -11.945592880249023, | |
"rewards/margins": 15.213022232055664, | |
"rewards/rejected": -27.158611297607422, | |
"step": 930 | |
}, | |
{ | |
"epoch": 0.7714402954452195, | |
"grad_norm": 26.385705947875977, | |
"learning_rate": 1.5312110338697427e-06, | |
"logits/chosen": -2.4121921062469482, | |
"logits/rejected": -2.68375825881958, | |
"logps/chosen": -9.157960891723633, | |
"logps/rejected": -20.01449203491211, | |
"loss": 0.9232, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -13.73694133758545, | |
"rewards/margins": 16.28479766845703, | |
"rewards/rejected": -30.021739959716797, | |
"step": 940 | |
}, | |
{ | |
"epoch": 0.7796471070988921, | |
"grad_norm": 3.784264087677002, | |
"learning_rate": 1.4831583923105e-06, | |
"logits/chosen": -2.466296672821045, | |
"logits/rejected": -2.634974956512451, | |
"logps/chosen": -9.769882202148438, | |
"logps/rejected": -18.733013153076172, | |
"loss": 0.6239, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -14.654825210571289, | |
"rewards/margins": 13.444696426391602, | |
"rewards/rejected": -28.09952163696289, | |
"step": 950 | |
}, | |
{ | |
"epoch": 0.7796471070988921, | |
"eval_logits/chosen": -2.3149495124816895, | |
"eval_logits/rejected": -2.655733346939087, | |
"eval_logps/chosen": -7.496860027313232, | |
"eval_logps/rejected": -18.8516788482666, | |
"eval_loss": 0.09784827381372452, | |
"eval_rewards/accuracies": 0.9696969985961914, | |
"eval_rewards/chosen": -11.24528980255127, | |
"eval_rewards/margins": 17.032228469848633, | |
"eval_rewards/rejected": -28.27751922607422, | |
"eval_runtime": 25.9982, | |
"eval_samples_per_second": 30.31, | |
"eval_steps_per_second": 3.808, | |
"step": 950 | |
}, | |
{ | |
"epoch": 0.7878539187525646, | |
"grad_norm": 6.634799003601074, | |
"learning_rate": 1.4355517710873184e-06, | |
"logits/chosen": -2.4064090251922607, | |
"logits/rejected": -2.7013659477233887, | |
"logps/chosen": -9.801457405090332, | |
"logps/rejected": -20.174358367919922, | |
"loss": 0.6526, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -14.702186584472656, | |
"rewards/margins": 15.559350967407227, | |
"rewards/rejected": -30.261539459228516, | |
"step": 960 | |
}, | |
{ | |
"epoch": 0.7960607304062371, | |
"grad_norm": 46.21657943725586, | |
"learning_rate": 1.388412052037682e-06, | |
"logits/chosen": -2.356576681137085, | |
"logits/rejected": -2.6283116340637207, | |
"logps/chosen": -9.053842544555664, | |
"logps/rejected": -21.279687881469727, | |
"loss": 0.9765, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -13.580764770507812, | |
"rewards/margins": 18.33877182006836, | |
"rewards/rejected": -31.91953468322754, | |
"step": 970 | |
}, | |
{ | |
"epoch": 0.8042675420599097, | |
"grad_norm": 3.6938939094543457, | |
"learning_rate": 1.3417599122003464e-06, | |
"logits/chosen": -2.4083611965179443, | |
"logits/rejected": -2.6813862323760986, | |
"logps/chosen": -9.064926147460938, | |
"logps/rejected": -20.76726531982422, | |
"loss": 0.8067, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -13.597389221191406, | |
"rewards/margins": 17.553510665893555, | |
"rewards/rejected": -31.15089988708496, | |
"step": 980 | |
}, | |
{ | |
"epoch": 0.8124743537135822, | |
"grad_norm": 9.791804313659668, | |
"learning_rate": 1.2956158147457116e-06, | |
"logits/chosen": -2.3875112533569336, | |
"logits/rejected": -2.6877217292785645, | |
"logps/chosen": -9.713172912597656, | |
"logps/rejected": -20.51764488220215, | |
"loss": 0.6454, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -14.5697603225708, | |
"rewards/margins": 16.206707000732422, | |
"rewards/rejected": -30.776464462280273, | |
"step": 990 | |
}, | |
{ | |
"epoch": 0.8206811653672548, | |
"grad_norm": 1.188546061515808, | |
"learning_rate": 1.2500000000000007e-06, | |
"logits/chosen": -2.400709629058838, | |
"logits/rejected": -2.613481283187866, | |
"logps/chosen": -9.234119415283203, | |
"logps/rejected": -20.3234920501709, | |
"loss": 0.7065, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -13.851181030273438, | |
"rewards/margins": 16.63405990600586, | |
"rewards/rejected": -30.485240936279297, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.8206811653672548, | |
"eval_logits/chosen": -2.3037211894989014, | |
"eval_logits/rejected": -2.6389148235321045, | |
"eval_logps/chosen": -7.639871120452881, | |
"eval_logps/rejected": -18.97751808166504, | |
"eval_loss": 0.09854340553283691, | |
"eval_rewards/accuracies": 0.9595959782600403, | |
"eval_rewards/chosen": -11.459808349609375, | |
"eval_rewards/margins": 17.006471633911133, | |
"eval_rewards/rejected": -28.466276168823242, | |
"eval_runtime": 26.0416, | |
"eval_samples_per_second": 30.259, | |
"eval_steps_per_second": 3.802, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.8288879770209274, | |
"grad_norm": 34.27748107910156, | |
"learning_rate": 1.204932476567175e-06, | |
"logits/chosen": -2.3972585201263428, | |
"logits/rejected": -2.6077466011047363, | |
"logps/chosen": -9.558837890625, | |
"logps/rejected": -21.304996490478516, | |
"loss": 0.9793, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -14.338258743286133, | |
"rewards/margins": 17.61923599243164, | |
"rewards/rejected": -31.957494735717773, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 0.8370947886746, | |
"grad_norm": 6.985296249389648, | |
"learning_rate": 1.160433012552508e-06, | |
"logits/chosen": -2.4298081398010254, | |
"logits/rejected": -2.6570680141448975, | |
"logps/chosen": -9.846342086791992, | |
"logps/rejected": -20.0854434967041, | |
"loss": 0.6918, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -14.769511222839355, | |
"rewards/margins": 15.35865306854248, | |
"rewards/rejected": -30.1281681060791, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 0.8453016003282725, | |
"grad_norm": 16.403400421142578, | |
"learning_rate": 1.11652112689164e-06, | |
"logits/chosen": -2.3441762924194336, | |
"logits/rejected": -2.5944087505340576, | |
"logps/chosen": -9.49109172821045, | |
"logps/rejected": -21.233768463134766, | |
"loss": 0.8113, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -14.236638069152832, | |
"rewards/margins": 17.614015579223633, | |
"rewards/rejected": -31.850650787353516, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 0.853508411981945, | |
"grad_norm": 4.5160040855407715, | |
"learning_rate": 1.073216080788921e-06, | |
"logits/chosen": -2.4048399925231934, | |
"logits/rejected": -2.6450343132019043, | |
"logps/chosen": -8.749728202819824, | |
"logps/rejected": -20.77089500427246, | |
"loss": 0.8586, | |
"rewards/accuracies": 0.949999988079071, | |
"rewards/chosen": -13.124593734741211, | |
"rewards/margins": 18.031749725341797, | |
"rewards/rejected": -31.15634536743164, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 0.8617152236356176, | |
"grad_norm": 5.133055210113525, | |
"learning_rate": 1.0305368692688175e-06, | |
"logits/chosen": -2.36543869972229, | |
"logits/rejected": -2.5925793647766113, | |
"logps/chosen": -9.336064338684082, | |
"logps/rejected": -20.378089904785156, | |
"loss": 0.8972, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -14.004096984863281, | |
"rewards/margins": 16.563039779663086, | |
"rewards/rejected": -30.5671329498291, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 0.8617152236356176, | |
"eval_logits/chosen": -2.3072853088378906, | |
"eval_logits/rejected": -2.6326565742492676, | |
"eval_logps/chosen": -8.39708137512207, | |
"eval_logps/rejected": -20.57996940612793, | |
"eval_loss": 0.09786142408847809, | |
"eval_rewards/accuracies": 0.9595959782600403, | |
"eval_rewards/chosen": -12.595619201660156, | |
"eval_rewards/margins": 18.27433204650879, | |
"eval_rewards/rejected": -30.869949340820312, | |
"eval_runtime": 26.0409, | |
"eval_samples_per_second": 30.26, | |
"eval_steps_per_second": 3.802, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 0.8699220352892901, | |
"grad_norm": 24.745431900024414, | |
"learning_rate": 9.88502212844063e-07, | |
"logits/chosen": -2.269465923309326, | |
"logits/rejected": -2.610417127609253, | |
"logps/chosen": -7.833917140960693, | |
"logps/rejected": -21.22043228149414, | |
"loss": 0.8923, | |
"rewards/accuracies": 1.0, | |
"rewards/chosen": -11.750875473022461, | |
"rewards/margins": 20.079769134521484, | |
"rewards/rejected": -31.830646514892578, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 0.8781288469429627, | |
"grad_norm": 1.3538175821304321, | |
"learning_rate": 9.471305493042243e-07, | |
"logits/chosen": -2.3404107093811035, | |
"logits/rejected": -2.6831154823303223, | |
"logps/chosen": -9.409693717956543, | |
"logps/rejected": -22.07307243347168, | |
"loss": 0.8694, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -14.114542007446289, | |
"rewards/margins": 18.99506950378418, | |
"rewards/rejected": -33.10961151123047, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 0.8863356585966352, | |
"grad_norm": 3.945587635040283, | |
"learning_rate": 9.064400256282757e-07, | |
"logits/chosen": -2.402027130126953, | |
"logits/rejected": -2.6573710441589355, | |
"logps/chosen": -9.867189407348633, | |
"logps/rejected": -21.119680404663086, | |
"loss": 1.0965, | |
"rewards/accuracies": 0.987500011920929, | |
"rewards/chosen": -14.800783157348633, | |
"rewards/margins": 16.878738403320312, | |
"rewards/rejected": -31.679523468017578, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 0.8945424702503078, | |
"grad_norm": 5.244990348815918, | |
"learning_rate": 8.664484900247363e-07, | |
"logits/chosen": -2.3451342582702637, | |
"logits/rejected": -2.563744306564331, | |
"logps/chosen": -8.859700202941895, | |
"logps/rejected": -19.88092613220215, | |
"loss": 0.6535, | |
"rewards/accuracies": 0.9624999761581421, | |
"rewards/chosen": -13.28955078125, | |
"rewards/margins": 16.53183937072754, | |
"rewards/rejected": -29.82139015197754, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 0.9027492819039803, | |
"grad_norm": 2.167773962020874, | |
"learning_rate": 8.271734841028553e-07, | |
"logits/chosen": -2.306011199951172, | |
"logits/rejected": -2.5858731269836426, | |
"logps/chosen": -8.683083534240723, | |
"logps/rejected": -20.459806442260742, | |
"loss": 0.7775, | |
"rewards/accuracies": 0.9750000238418579, | |
"rewards/chosen": -13.024624824523926, | |
"rewards/margins": 17.66508674621582, | |
"rewards/rejected": -30.689708709716797, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.9027492819039803, | |
"eval_logits/chosen": -2.2880752086639404, | |
"eval_logits/rejected": -2.615443468093872, | |
"eval_logps/chosen": -7.504647731781006, | |
"eval_logps/rejected": -19.788803100585938, | |
"eval_loss": 0.09024464339017868, | |
"eval_rewards/accuracies": 0.9696969985961914, | |
"eval_rewards/chosen": -11.25697135925293, | |
"eval_rewards/margins": 18.42623519897461, | |
"eval_rewards/rejected": -29.68320655822754, | |
"eval_runtime": 26.0393, | |
"eval_samples_per_second": 30.262, | |
"eval_steps_per_second": 3.802, | |
"step": 1100 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 3.9998944549776916e+18, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |