|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 478, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0020920502092050207, |
|
"grad_norm": 9.087554245569063, |
|
"learning_rate": 1.0416666666666666e-08, |
|
"logits/chosen": -2.7662220001220703, |
|
"logits/rejected": -2.7178127765655518, |
|
"logps/chosen": -269.6776123046875, |
|
"logps/rejected": -360.6510314941406, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02092050209205021, |
|
"grad_norm": 9.138148617727442, |
|
"learning_rate": 1.0416666666666667e-07, |
|
"logits/chosen": -2.5923047065734863, |
|
"logits/rejected": -2.562880277633667, |
|
"logps/chosen": -264.8013916015625, |
|
"logps/rejected": -251.54592895507812, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.0002887251030188054, |
|
"rewards/margins": -8.625661575933918e-05, |
|
"rewards/rejected": -0.00020246852363925427, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04184100418410042, |
|
"grad_norm": 8.411516829739382, |
|
"learning_rate": 2.0833333333333333e-07, |
|
"logits/chosen": -2.653226613998413, |
|
"logits/rejected": -2.605504035949707, |
|
"logps/chosen": -281.4090576171875, |
|
"logps/rejected": -296.7936096191406, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": 0.0007672592182643712, |
|
"rewards/margins": 0.0005850494490005076, |
|
"rewards/rejected": 0.0001822096382966265, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06276150627615062, |
|
"grad_norm": 8.341151841493089, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.664233684539795, |
|
"logits/rejected": -2.5916261672973633, |
|
"logps/chosen": -299.38519287109375, |
|
"logps/rejected": -263.70245361328125, |
|
"loss": 0.6885, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.012760737910866737, |
|
"rewards/margins": 0.010479705408215523, |
|
"rewards/rejected": 0.0022810332011431456, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08368200836820083, |
|
"grad_norm": 8.161334179415334, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -2.557464838027954, |
|
"logits/rejected": -2.522446393966675, |
|
"logps/chosen": -263.1476745605469, |
|
"logps/rejected": -245.10128784179688, |
|
"loss": 0.6776, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.03926760330796242, |
|
"rewards/margins": 0.04648735374212265, |
|
"rewards/rejected": -0.007219746708869934, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10460251046025104, |
|
"grad_norm": 8.412121997546578, |
|
"learning_rate": 4.999733114418725e-07, |
|
"logits/chosen": -2.4982900619506836, |
|
"logits/rejected": -2.458754301071167, |
|
"logps/chosen": -279.72418212890625, |
|
"logps/rejected": -297.51483154296875, |
|
"loss": 0.6615, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.0003564223588909954, |
|
"rewards/margins": 0.0477915033698082, |
|
"rewards/rejected": -0.04743507504463196, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12552301255230125, |
|
"grad_norm": 11.28651377706942, |
|
"learning_rate": 4.990398100856366e-07, |
|
"logits/chosen": -2.512681245803833, |
|
"logits/rejected": -2.4412343502044678, |
|
"logps/chosen": -273.84716796875, |
|
"logps/rejected": -313.0481872558594, |
|
"loss": 0.639, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.02180738002061844, |
|
"rewards/margins": 0.1268891841173172, |
|
"rewards/rejected": -0.14869657158851624, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14644351464435146, |
|
"grad_norm": 10.758366048663921, |
|
"learning_rate": 4.967775735898179e-07, |
|
"logits/chosen": -2.3633859157562256, |
|
"logits/rejected": -2.360542058944702, |
|
"logps/chosen": -271.2507019042969, |
|
"logps/rejected": -283.77508544921875, |
|
"loss": 0.6157, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.05616923049092293, |
|
"rewards/margins": 0.21398130059242249, |
|
"rewards/rejected": -0.2701505124568939, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.16736401673640167, |
|
"grad_norm": 16.20901309109112, |
|
"learning_rate": 4.931986719649298e-07, |
|
"logits/chosen": -2.5523719787597656, |
|
"logits/rejected": -2.4694900512695312, |
|
"logps/chosen": -335.7901306152344, |
|
"logps/rejected": -304.34521484375, |
|
"loss": 0.6023, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.12984994053840637, |
|
"rewards/margins": 0.2733689248561859, |
|
"rewards/rejected": -0.4032188355922699, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.18828451882845187, |
|
"grad_norm": 20.628675413942332, |
|
"learning_rate": 4.883222001996351e-07, |
|
"logits/chosen": -2.1346848011016846, |
|
"logits/rejected": -2.0720489025115967, |
|
"logps/chosen": -313.75103759765625, |
|
"logps/rejected": -355.2326965332031, |
|
"loss": 0.5741, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.546220064163208, |
|
"rewards/margins": 0.4818921983242035, |
|
"rewards/rejected": -1.0281121730804443, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.20920502092050208, |
|
"grad_norm": 15.512372945392126, |
|
"learning_rate": 4.821741763807186e-07, |
|
"logits/chosen": -1.6733529567718506, |
|
"logits/rejected": -1.5271198749542236, |
|
"logps/chosen": -313.7639465332031, |
|
"logps/rejected": -366.28643798828125, |
|
"loss": 0.5644, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.5339412689208984, |
|
"rewards/margins": 0.5581195950508118, |
|
"rewards/rejected": -1.0920608043670654, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.20920502092050208, |
|
"eval_logits/chosen": -1.8042092323303223, |
|
"eval_logits/rejected": -1.6462740898132324, |
|
"eval_logps/chosen": -310.176025390625, |
|
"eval_logps/rejected": -361.0130615234375, |
|
"eval_loss": 0.5693150758743286, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.4760096073150635, |
|
"eval_rewards/margins": 0.5072795152664185, |
|
"eval_rewards/rejected": -0.9832891225814819, |
|
"eval_runtime": 39.9983, |
|
"eval_samples_per_second": 50.002, |
|
"eval_steps_per_second": 0.8, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2301255230125523, |
|
"grad_norm": 20.060254091732027, |
|
"learning_rate": 4.747874028753375e-07, |
|
"logits/chosen": -1.6645755767822266, |
|
"logits/rejected": -1.2451729774475098, |
|
"logps/chosen": -368.3230285644531, |
|
"logps/rejected": -373.16448974609375, |
|
"loss": 0.5666, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.6374339461326599, |
|
"rewards/margins": 0.5157445669174194, |
|
"rewards/rejected": -1.1531786918640137, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2510460251046025, |
|
"grad_norm": 20.825225854686142, |
|
"learning_rate": 4.662012913161997e-07, |
|
"logits/chosen": -1.753260612487793, |
|
"logits/rejected": -1.4734609127044678, |
|
"logps/chosen": -327.0220947265625, |
|
"logps/rejected": -357.7860107421875, |
|
"loss": 0.5607, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.4582589268684387, |
|
"rewards/margins": 0.5211780071258545, |
|
"rewards/rejected": -0.9794368743896484, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2719665271966527, |
|
"grad_norm": 23.496638764764725, |
|
"learning_rate": 4.5646165232345103e-07, |
|
"logits/chosen": -0.5949552655220032, |
|
"logits/rejected": -0.22445210814476013, |
|
"logps/chosen": -385.47186279296875, |
|
"logps/rejected": -434.91864013671875, |
|
"loss": 0.54, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.0354242324829102, |
|
"rewards/margins": 0.6292093992233276, |
|
"rewards/rejected": -1.6646335124969482, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2928870292887029, |
|
"grad_norm": 19.8432073115127, |
|
"learning_rate": 4.456204510851956e-07, |
|
"logits/chosen": -0.9896577596664429, |
|
"logits/rejected": -0.6126431226730347, |
|
"logps/chosen": -403.9255676269531, |
|
"logps/rejected": -456.3795471191406, |
|
"loss": 0.5379, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8796173334121704, |
|
"rewards/margins": 0.6777242422103882, |
|
"rewards/rejected": -1.557341456413269, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3138075313807531, |
|
"grad_norm": 24.252740229867097, |
|
"learning_rate": 4.337355301007335e-07, |
|
"logits/chosen": -1.2636734247207642, |
|
"logits/rejected": -0.7799659967422485, |
|
"logps/chosen": -352.56219482421875, |
|
"logps/rejected": -398.70361328125, |
|
"loss": 0.5431, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.6931993365287781, |
|
"rewards/margins": 0.5318464040756226, |
|
"rewards/rejected": -1.2250458002090454, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.33472803347280333, |
|
"grad_norm": 23.380475895734964, |
|
"learning_rate": 4.2087030056579986e-07, |
|
"logits/chosen": -0.7647526860237122, |
|
"logits/rejected": -0.008624720387160778, |
|
"logps/chosen": -355.8943786621094, |
|
"logps/rejected": -413.7810974121094, |
|
"loss": 0.5422, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8285518884658813, |
|
"rewards/margins": 0.7353727221488953, |
|
"rewards/rejected": -1.5639246702194214, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.35564853556485354, |
|
"grad_norm": 25.431140167493865, |
|
"learning_rate": 4.070934040463998e-07, |
|
"logits/chosen": -0.6836012601852417, |
|
"logits/rejected": -0.216752290725708, |
|
"logps/chosen": -330.0359191894531, |
|
"logps/rejected": -370.3531799316406, |
|
"loss": 0.5398, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.8072524070739746, |
|
"rewards/margins": 0.5846077799797058, |
|
"rewards/rejected": -1.3918602466583252, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.37656903765690375, |
|
"grad_norm": 20.150510606814603, |
|
"learning_rate": 3.9247834624635404e-07, |
|
"logits/chosen": -0.19145147502422333, |
|
"logits/rejected": 0.5919758081436157, |
|
"logps/chosen": -332.99493408203125, |
|
"logps/rejected": -376.44049072265625, |
|
"loss": 0.5038, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.7668272256851196, |
|
"rewards/margins": 0.7220631837844849, |
|
"rewards/rejected": -1.4888904094696045, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.39748953974895396, |
|
"grad_norm": 26.333247990716465, |
|
"learning_rate": 3.7710310482256523e-07, |
|
"logits/chosen": -0.44246259331703186, |
|
"logits/rejected": 0.3344479501247406, |
|
"logps/chosen": -364.98065185546875, |
|
"logps/rejected": -421.58978271484375, |
|
"loss": 0.5202, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.9371756315231323, |
|
"rewards/margins": 0.6810097098350525, |
|
"rewards/rejected": -1.6181854009628296, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.41841004184100417, |
|
"grad_norm": 61.74429639749924, |
|
"learning_rate": 3.610497133404795e-07, |
|
"logits/chosen": -0.9733116030693054, |
|
"logits/rejected": -0.424935907125473, |
|
"logps/chosen": -343.17181396484375, |
|
"logps/rejected": -409.66015625, |
|
"loss": 0.5482, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.8325435519218445, |
|
"rewards/margins": 0.7432821393013, |
|
"rewards/rejected": -1.5758254528045654, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.41841004184100417, |
|
"eval_logits/chosen": -1.6512479782104492, |
|
"eval_logits/rejected": -1.1108059883117676, |
|
"eval_logps/chosen": -320.461181640625, |
|
"eval_logps/rejected": -395.9195556640625, |
|
"eval_loss": 0.5285090208053589, |
|
"eval_rewards/accuracies": 0.78125, |
|
"eval_rewards/chosen": -0.5788613557815552, |
|
"eval_rewards/margins": 0.7534924745559692, |
|
"eval_rewards/rejected": -1.3323538303375244, |
|
"eval_runtime": 39.8686, |
|
"eval_samples_per_second": 50.165, |
|
"eval_steps_per_second": 0.803, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4393305439330544, |
|
"grad_norm": 23.039933494228325, |
|
"learning_rate": 3.4440382358952115e-07, |
|
"logits/chosen": -1.410441279411316, |
|
"logits/rejected": -0.5446120500564575, |
|
"logps/chosen": -362.73443603515625, |
|
"logps/rejected": -392.86572265625, |
|
"loss": 0.5374, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.7306278944015503, |
|
"rewards/margins": 0.6853674650192261, |
|
"rewards/rejected": -1.4159953594207764, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4602510460251046, |
|
"grad_norm": 28.84599390547246, |
|
"learning_rate": 3.272542485937368e-07, |
|
"logits/chosen": 0.19500534236431122, |
|
"logits/rejected": 1.4215036630630493, |
|
"logps/chosen": -366.9354248046875, |
|
"logps/rejected": -427.8094787597656, |
|
"loss": 0.521, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.015799641609192, |
|
"rewards/margins": 0.8056750297546387, |
|
"rewards/rejected": -1.8214746713638306, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4811715481171548, |
|
"grad_norm": 23.1895401529341, |
|
"learning_rate": 3.096924887558854e-07, |
|
"logits/chosen": 0.23373627662658691, |
|
"logits/rejected": 1.2636606693267822, |
|
"logps/chosen": -344.4990234375, |
|
"logps/rejected": -433.19549560546875, |
|
"loss": 0.5423, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.9555310010910034, |
|
"rewards/margins": 0.8986861109733582, |
|
"rewards/rejected": -1.8542171716690063, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.502092050209205, |
|
"grad_norm": 25.383792279845476, |
|
"learning_rate": 2.9181224366319943e-07, |
|
"logits/chosen": -0.8247690200805664, |
|
"logits/rejected": -0.12202179431915283, |
|
"logps/chosen": -338.62860107421875, |
|
"logps/rejected": -393.34130859375, |
|
"loss": 0.5049, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.7489091753959656, |
|
"rewards/margins": 0.6940961480140686, |
|
"rewards/rejected": -1.4430053234100342, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5230125523012552, |
|
"grad_norm": 23.764621178377002, |
|
"learning_rate": 2.7370891215954565e-07, |
|
"logits/chosen": -0.5904697179794312, |
|
"logits/rejected": 0.471386581659317, |
|
"logps/chosen": -372.9373474121094, |
|
"logps/rejected": -412.6297302246094, |
|
"loss": 0.5051, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.7474657297134399, |
|
"rewards/margins": 0.7835052013397217, |
|
"rewards/rejected": -1.5309709310531616, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5439330543933054, |
|
"grad_norm": 31.749178242054935, |
|
"learning_rate": 2.55479083351317e-07, |
|
"logits/chosen": -0.029442792758345604, |
|
"logits/rejected": 1.2092030048370361, |
|
"logps/chosen": -399.5458068847656, |
|
"logps/rejected": -442.3656311035156, |
|
"loss": 0.5089, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.0523649454116821, |
|
"rewards/margins": 0.8320234417915344, |
|
"rewards/rejected": -1.8843883275985718, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5648535564853556, |
|
"grad_norm": 32.580466587949495, |
|
"learning_rate": 2.3722002126275822e-07, |
|
"logits/chosen": -0.23208408057689667, |
|
"logits/rejected": 0.542866051197052, |
|
"logps/chosen": -380.35986328125, |
|
"logps/rejected": -433.56298828125, |
|
"loss": 0.5176, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.0335900783538818, |
|
"rewards/margins": 0.7255193591117859, |
|
"rewards/rejected": -1.7591092586517334, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5857740585774058, |
|
"grad_norm": 26.02223175106448, |
|
"learning_rate": 2.19029145890313e-07, |
|
"logits/chosen": -0.5966639518737793, |
|
"logits/rejected": 0.5926642417907715, |
|
"logps/chosen": -369.0196228027344, |
|
"logps/rejected": -428.75811767578125, |
|
"loss": 0.5287, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.0736175775527954, |
|
"rewards/margins": 0.7921027541160583, |
|
"rewards/rejected": -1.8657201528549194, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.606694560669456, |
|
"grad_norm": 25.62251347650328, |
|
"learning_rate": 2.0100351342479216e-07, |
|
"logits/chosen": -0.8950376510620117, |
|
"logits/rejected": 0.23548343777656555, |
|
"logps/chosen": -361.40338134765625, |
|
"logps/rejected": -425.9239196777344, |
|
"loss": 0.515, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.103497862815857, |
|
"rewards/margins": 0.7359486222267151, |
|
"rewards/rejected": -1.8394466638565063, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6276150627615062, |
|
"grad_norm": 27.37599869158437, |
|
"learning_rate": 1.8323929841460178e-07, |
|
"logits/chosen": -0.7678520083427429, |
|
"logits/rejected": 0.4499804973602295, |
|
"logps/chosen": -407.6468811035156, |
|
"logps/rejected": -444.9945373535156, |
|
"loss": 0.4952, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.0661542415618896, |
|
"rewards/margins": 0.7832947969436646, |
|
"rewards/rejected": -1.8494491577148438, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6276150627615062, |
|
"eval_logits/chosen": -0.3939850628376007, |
|
"eval_logits/rejected": 0.5574444532394409, |
|
"eval_logps/chosen": -364.551513671875, |
|
"eval_logps/rejected": -457.5015869140625, |
|
"eval_loss": 0.506661593914032, |
|
"eval_rewards/accuracies": 0.7734375, |
|
"eval_rewards/chosen": -1.0197644233703613, |
|
"eval_rewards/margins": 0.9284095168113708, |
|
"eval_rewards/rejected": -1.948173999786377, |
|
"eval_runtime": 39.937, |
|
"eval_samples_per_second": 50.079, |
|
"eval_steps_per_second": 0.801, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6485355648535565, |
|
"grad_norm": 28.6039895111547, |
|
"learning_rate": 1.6583128063291573e-07, |
|
"logits/chosen": -0.16188159584999084, |
|
"logits/rejected": 0.7324084043502808, |
|
"logps/chosen": -413.48211669921875, |
|
"logps/rejected": -454.82049560546875, |
|
"loss": 0.4907, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.1815006732940674, |
|
"rewards/margins": 0.7815917134284973, |
|
"rewards/rejected": -1.963092565536499, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6694560669456067, |
|
"grad_norm": 31.106644428753697, |
|
"learning_rate": 1.488723393865766e-07, |
|
"logits/chosen": 0.08699577301740646, |
|
"logits/rejected": 1.0748140811920166, |
|
"logps/chosen": -411.9208984375, |
|
"logps/rejected": -441.51885986328125, |
|
"loss": 0.4821, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.1822444200515747, |
|
"rewards/margins": 0.7845796346664429, |
|
"rewards/rejected": -1.966823935508728, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6903765690376569, |
|
"grad_norm": 30.058877077850866, |
|
"learning_rate": 1.3245295796480788e-07, |
|
"logits/chosen": -0.6640967130661011, |
|
"logits/rejected": 0.42964258790016174, |
|
"logps/chosen": -359.71240234375, |
|
"logps/rejected": -434.0989685058594, |
|
"loss": 0.4928, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.9748628735542297, |
|
"rewards/margins": 0.7710379362106323, |
|
"rewards/rejected": -1.7459007501602173, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7112970711297071, |
|
"grad_norm": 41.27703909057966, |
|
"learning_rate": 1.1666074087171627e-07, |
|
"logits/chosen": -0.4426918923854828, |
|
"logits/rejected": 0.6907607316970825, |
|
"logps/chosen": -376.0263671875, |
|
"logps/rejected": -474.44952392578125, |
|
"loss": 0.5043, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -0.8598421216011047, |
|
"rewards/margins": 1.083594560623169, |
|
"rewards/rejected": -1.943436622619629, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7322175732217573, |
|
"grad_norm": 25.85661223238129, |
|
"learning_rate": 1.0157994641835734e-07, |
|
"logits/chosen": -0.19413986802101135, |
|
"logits/rejected": 0.754654586315155, |
|
"logps/chosen": -361.0034484863281, |
|
"logps/rejected": -430.91253662109375, |
|
"loss": 0.4784, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.0284228324890137, |
|
"rewards/margins": 0.8736444711685181, |
|
"rewards/rejected": -1.9020671844482422, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7531380753138075, |
|
"grad_norm": 24.132100088486087, |
|
"learning_rate": 8.729103716819111e-08, |
|
"logits/chosen": -0.49086451530456543, |
|
"logits/rejected": 0.8730924725532532, |
|
"logps/chosen": -414.8470764160156, |
|
"logps/rejected": -465.10357666015625, |
|
"loss": 0.5147, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.1219046115875244, |
|
"rewards/margins": 0.9166750907897949, |
|
"rewards/rejected": -2.0385801792144775, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7740585774058577, |
|
"grad_norm": 26.79975843848195, |
|
"learning_rate": 7.387025063449081e-08, |
|
"logits/chosen": -0.14118070900440216, |
|
"logits/rejected": 0.7372316718101501, |
|
"logps/chosen": -402.12664794921875, |
|
"logps/rejected": -436.84375, |
|
"loss": 0.509, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.2679729461669922, |
|
"rewards/margins": 0.7179969549179077, |
|
"rewards/rejected": -1.9859699010849, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7949790794979079, |
|
"grad_norm": 25.72376549261972, |
|
"learning_rate": 6.138919252022435e-08, |
|
"logits/chosen": 0.09815589338541031, |
|
"logits/rejected": 1.0596524477005005, |
|
"logps/chosen": -381.30810546875, |
|
"logps/rejected": -490.9652404785156, |
|
"loss": 0.5048, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.4183294773101807, |
|
"rewards/margins": 0.9044706225395203, |
|
"rewards/rejected": -2.3228001594543457, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8158995815899581, |
|
"grad_norm": 24.193050103935505, |
|
"learning_rate": 4.991445467064689e-08, |
|
"logits/chosen": -0.3292013108730316, |
|
"logits/rejected": 0.42220011353492737, |
|
"logps/chosen": -420.472900390625, |
|
"logps/rejected": -483.74005126953125, |
|
"loss": 0.4897, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.1483708620071411, |
|
"rewards/margins": 0.8494104146957397, |
|
"rewards/rejected": -1.99778151512146, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8368200836820083, |
|
"grad_norm": 25.887805811543046, |
|
"learning_rate": 3.9507259776993954e-08, |
|
"logits/chosen": -0.24365536868572235, |
|
"logits/rejected": 1.0439943075180054, |
|
"logps/chosen": -386.0181579589844, |
|
"logps/rejected": -473.2196350097656, |
|
"loss": 0.5037, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.1546484231948853, |
|
"rewards/margins": 0.9076915979385376, |
|
"rewards/rejected": -2.062340259552002, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8368200836820083, |
|
"eval_logits/chosen": -0.3904561996459961, |
|
"eval_logits/rejected": 0.6357870101928711, |
|
"eval_logps/chosen": -366.52386474609375, |
|
"eval_logps/rejected": -463.7657775878906, |
|
"eval_loss": 0.5006148815155029, |
|
"eval_rewards/accuracies": 0.78515625, |
|
"eval_rewards/chosen": -1.0394879579544067, |
|
"eval_rewards/margins": 0.9713287949562073, |
|
"eval_rewards/rejected": -2.010816812515259, |
|
"eval_runtime": 39.7974, |
|
"eval_samples_per_second": 50.254, |
|
"eval_steps_per_second": 0.804, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8577405857740585, |
|
"grad_norm": 28.490429904013755, |
|
"learning_rate": 3.022313472693447e-08, |
|
"logits/chosen": -0.38060712814331055, |
|
"logits/rejected": 0.9389992952346802, |
|
"logps/chosen": -409.4950256347656, |
|
"logps/rejected": -468.64715576171875, |
|
"loss": 0.5073, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.1054986715316772, |
|
"rewards/margins": 0.8928801417350769, |
|
"rewards/rejected": -1.9983787536621094, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8786610878661087, |
|
"grad_norm": 24.562220908212364, |
|
"learning_rate": 2.2111614344599684e-08, |
|
"logits/chosen": -0.4677561819553375, |
|
"logits/rejected": 0.5116729736328125, |
|
"logps/chosen": -406.060546875, |
|
"logps/rejected": -465.40130615234375, |
|
"loss": 0.4974, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.0901445150375366, |
|
"rewards/margins": 0.8191754221916199, |
|
"rewards/rejected": -1.9093201160430908, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.899581589958159, |
|
"grad_norm": 25.456098375916802, |
|
"learning_rate": 1.521597710086439e-08, |
|
"logits/chosen": -0.10303229093551636, |
|
"logits/rejected": 1.026650309562683, |
|
"logps/chosen": -397.377197265625, |
|
"logps/rejected": -464.6932067871094, |
|
"loss": 0.4766, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -1.182983636856079, |
|
"rewards/margins": 0.8865542411804199, |
|
"rewards/rejected": -2.06953763961792, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9205020920502092, |
|
"grad_norm": 28.11934736057114, |
|
"learning_rate": 9.57301420397924e-09, |
|
"logits/chosen": -0.32768553495407104, |
|
"logits/rejected": 0.8762621879577637, |
|
"logps/chosen": -392.1067810058594, |
|
"logps/rejected": -460.0609436035156, |
|
"loss": 0.4971, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.0779250860214233, |
|
"rewards/margins": 0.8317632675170898, |
|
"rewards/rejected": -1.9096883535385132, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9414225941422594, |
|
"grad_norm": 24.86592939122457, |
|
"learning_rate": 5.212833302556258e-09, |
|
"logits/chosen": -0.24481463432312012, |
|
"logits/rejected": 0.7605234384536743, |
|
"logps/chosen": -409.94793701171875, |
|
"logps/rejected": -506.735595703125, |
|
"loss": 0.4959, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.1622434854507446, |
|
"rewards/margins": 0.8344427347183228, |
|
"rewards/rejected": -1.9966862201690674, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9623430962343096, |
|
"grad_norm": 27.408370428716005, |
|
"learning_rate": 2.158697848236607e-09, |
|
"logits/chosen": -0.22167810797691345, |
|
"logits/rejected": 0.9164765477180481, |
|
"logps/chosen": -382.83819580078125, |
|
"logps/rejected": -436.36114501953125, |
|
"loss": 0.498, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.1093404293060303, |
|
"rewards/margins": 0.827775776386261, |
|
"rewards/rejected": -1.937116265296936, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9832635983263598, |
|
"grad_norm": 25.655126921818987, |
|
"learning_rate": 4.269029751107489e-10, |
|
"logits/chosen": -0.3841325640678406, |
|
"logits/rejected": 0.87626713514328, |
|
"logps/chosen": -385.66424560546875, |
|
"logps/rejected": -474.7689514160156, |
|
"loss": 0.492, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.0700037479400635, |
|
"rewards/margins": 0.8540438413619995, |
|
"rewards/rejected": -1.9240477085113525, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 478, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5400827195355085, |
|
"train_runtime": 3932.0785, |
|
"train_samples_per_second": 15.548, |
|
"train_steps_per_second": 0.122 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 478, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|