|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984917043740573, |
|
"eval_steps": 100, |
|
"global_step": 331, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0030165912518853697, |
|
"grad_norm": 153.23599243164062, |
|
"learning_rate": 1.4705882352941178e-07, |
|
"logits/chosen": -2.375755786895752, |
|
"logits/rejected": -2.402980327606201, |
|
"logps/chosen": -1.5958898067474365, |
|
"logps/rejected": -1.7740895748138428, |
|
"loss": 2500.0, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.030165912518853696, |
|
"grad_norm": 118.78604125976562, |
|
"learning_rate": 1.4705882352941177e-06, |
|
"logits/chosen": -2.3309760093688965, |
|
"logits/rejected": -2.383087635040283, |
|
"logps/chosen": -1.5815058946609497, |
|
"logps/rejected": -1.6628098487854004, |
|
"loss": 2499.6899, |
|
"rewards/accuracies": 0.4930555522441864, |
|
"rewards/chosen": 0.00026574183721095324, |
|
"rewards/margins": 1.8858834664570168e-05, |
|
"rewards/rejected": 0.0002468830207362771, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06033182503770739, |
|
"grad_norm": 107.57190704345703, |
|
"learning_rate": 2.9411764705882355e-06, |
|
"logits/chosen": -2.3686156272888184, |
|
"logits/rejected": -2.4214446544647217, |
|
"logps/chosen": -1.5373353958129883, |
|
"logps/rejected": -1.5770070552825928, |
|
"loss": 2498.7738, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.001089997822418809, |
|
"rewards/margins": 0.00014883658150210977, |
|
"rewards/rejected": 0.0009411612409166992, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09049773755656108, |
|
"grad_norm": 116.10955810546875, |
|
"learning_rate": 4.411764705882353e-06, |
|
"logits/chosen": -2.4276905059814453, |
|
"logits/rejected": -2.4831461906433105, |
|
"logps/chosen": -1.5260236263275146, |
|
"logps/rejected": -1.5350881814956665, |
|
"loss": 2497.6447, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.0010487588588148355, |
|
"rewards/margins": 0.00025444405036978424, |
|
"rewards/rejected": 0.0007943148957565427, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12066365007541478, |
|
"grad_norm": 207.2774200439453, |
|
"learning_rate": 4.994966691179712e-06, |
|
"logits/chosen": -2.456132411956787, |
|
"logits/rejected": -2.4862520694732666, |
|
"logps/chosen": -1.5139974355697632, |
|
"logps/rejected": -1.6103894710540771, |
|
"loss": 2494.1223, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": 0.00042313727317377925, |
|
"rewards/margins": 0.0003380453563295305, |
|
"rewards/rejected": 8.509196049999446e-05, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15082956259426847, |
|
"grad_norm": 297.2610778808594, |
|
"learning_rate": 4.964280947263677e-06, |
|
"logits/chosen": -2.456686019897461, |
|
"logits/rejected": -2.4907171726226807, |
|
"logps/chosen": -1.7029392719268799, |
|
"logps/rejected": -1.866715669631958, |
|
"loss": 2488.6879, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.0010579732479527593, |
|
"rewards/margins": 0.0010943589732050896, |
|
"rewards/rejected": -0.0021523318719118834, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18099547511312217, |
|
"grad_norm": 418.49908447265625, |
|
"learning_rate": 4.906048344162677e-06, |
|
"logits/chosen": -2.526425838470459, |
|
"logits/rejected": -2.6283812522888184, |
|
"logps/chosen": -2.2732770442962646, |
|
"logps/rejected": -2.463239908218384, |
|
"loss": 2478.1562, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.006247864570468664, |
|
"rewards/margins": 0.0015761550748720765, |
|
"rewards/rejected": -0.007824019528925419, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.21116138763197587, |
|
"grad_norm": 884.2985229492188, |
|
"learning_rate": 4.8209198325401815e-06, |
|
"logits/chosen": -2.4687628746032715, |
|
"logits/rejected": -2.5590322017669678, |
|
"logps/chosen": -3.124673366546631, |
|
"logps/rejected": -3.5420584678649902, |
|
"loss": 2466.9961, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.01539889257401228, |
|
"rewards/margins": 0.003336571855470538, |
|
"rewards/rejected": -0.018735462799668312, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.24132730015082957, |
|
"grad_norm": 2458.998291015625, |
|
"learning_rate": 4.709847017822876e-06, |
|
"logits/chosen": -2.2200820446014404, |
|
"logits/rejected": -2.332976818084717, |
|
"logps/chosen": -5.963655471801758, |
|
"logps/rejected": -6.942639350891113, |
|
"loss": 2410.8664, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.04354003071784973, |
|
"rewards/margins": 0.009163432754576206, |
|
"rewards/rejected": -0.05270346254110336, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.27149321266968324, |
|
"grad_norm": 3877.559326171875, |
|
"learning_rate": 4.5740715227200904e-06, |
|
"logits/chosen": -2.4160284996032715, |
|
"logits/rejected": -2.449448585510254, |
|
"logps/chosen": -12.50175666809082, |
|
"logps/rejected": -15.007980346679688, |
|
"loss": 2301.9066, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.10840582847595215, |
|
"rewards/margins": 0.02461237832903862, |
|
"rewards/rejected": -0.13301819562911987, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.30165912518853694, |
|
"grad_norm": 5709.41943359375, |
|
"learning_rate": 4.415111107797445e-06, |
|
"logits/chosen": -3.1761019229888916, |
|
"logits/rejected": -3.347121000289917, |
|
"logps/chosen": -13.674809455871582, |
|
"logps/rejected": -16.90283203125, |
|
"loss": 2178.8832, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.12066298723220825, |
|
"rewards/margins": 0.03184697404503822, |
|
"rewards/rejected": -0.15250995755195618, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.30165912518853694, |
|
"eval_logits/chosen": -3.1136586666107178, |
|
"eval_logits/rejected": -3.319279909133911, |
|
"eval_logps/chosen": -11.729300498962402, |
|
"eval_logps/rejected": -16.831649780273438, |
|
"eval_loss": 2139.634033203125, |
|
"eval_rewards/accuracies": 0.6780821681022644, |
|
"eval_rewards/chosen": -0.10137370228767395, |
|
"eval_rewards/margins": 0.050448909401893616, |
|
"eval_rewards/rejected": -0.15182259678840637, |
|
"eval_runtime": 118.6764, |
|
"eval_samples_per_second": 39.233, |
|
"eval_steps_per_second": 0.615, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.33182503770739064, |
|
"grad_norm": 6360.60302734375, |
|
"learning_rate": 4.2347427052552725e-06, |
|
"logits/chosen": -3.4674181938171387, |
|
"logits/rejected": -3.7290732860565186, |
|
"logps/chosen": -14.259218215942383, |
|
"logps/rejected": -19.902536392211914, |
|
"loss": 2163.7926, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.12664386630058289, |
|
"rewards/margins": 0.05585067346692085, |
|
"rewards/rejected": -0.18249455094337463, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.36199095022624433, |
|
"grad_norm": 5877.59765625, |
|
"learning_rate": 4.034982555568005e-06, |
|
"logits/chosen": -3.6185920238494873, |
|
"logits/rejected": -3.8847007751464844, |
|
"logps/chosen": -9.406723976135254, |
|
"logps/rejected": -15.053186416625977, |
|
"loss": 2148.0309, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.07808898389339447, |
|
"rewards/margins": 0.056064266711473465, |
|
"rewards/rejected": -0.13415324687957764, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.39215686274509803, |
|
"grad_norm": 5015.537109375, |
|
"learning_rate": 3.8180636690262565e-06, |
|
"logits/chosen": -4.268899440765381, |
|
"logits/rejected": -4.4745073318481445, |
|
"logps/chosen": -15.916064262390137, |
|
"logps/rejected": -19.643577575683594, |
|
"loss": 2108.7164, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.1432579904794693, |
|
"rewards/margins": 0.0371844656765461, |
|
"rewards/rejected": -0.1804424524307251, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.42232277526395173, |
|
"grad_norm": 6310.57421875, |
|
"learning_rate": 3.5864108641267815e-06, |
|
"logits/chosen": -4.173009395599365, |
|
"logits/rejected": -4.646049976348877, |
|
"logps/chosen": -11.417980194091797, |
|
"logps/rejected": -18.31503677368164, |
|
"loss": 2076.9801, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.09848929196596146, |
|
"rewards/margins": 0.06831111013889313, |
|
"rewards/rejected": -0.1668003797531128, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.45248868778280543, |
|
"grad_norm": 6660.42333984375, |
|
"learning_rate": 3.3426136618426045e-06, |
|
"logits/chosen": -4.818399906158447, |
|
"logits/rejected": -5.288527488708496, |
|
"logps/chosen": -12.7257080078125, |
|
"logps/rejected": -20.176372528076172, |
|
"loss": 2057.0469, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.11181428283452988, |
|
"rewards/margins": 0.07331486791372299, |
|
"rewards/rejected": -0.18512913584709167, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.48265460030165913, |
|
"grad_norm": 6026.16943359375, |
|
"learning_rate": 3.089397338773569e-06, |
|
"logits/chosen": -5.171442985534668, |
|
"logits/rejected": -5.675396919250488, |
|
"logps/chosen": -11.334589958190918, |
|
"logps/rejected": -18.081167221069336, |
|
"loss": 2062.909, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.09671419113874435, |
|
"rewards/margins": 0.06745417416095734, |
|
"rewards/rejected": -0.1641683727502823, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 8983.7353515625, |
|
"learning_rate": 2.829592462758401e-06, |
|
"logits/chosen": -6.229965686798096, |
|
"logits/rejected": -6.906668663024902, |
|
"logps/chosen": -21.240734100341797, |
|
"logps/rejected": -27.307592391967773, |
|
"loss": 2008.2311, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.19601497054100037, |
|
"rewards/margins": 0.06066788360476494, |
|
"rewards/rejected": -0.2566828429698944, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.5429864253393665, |
|
"grad_norm": 6153.8115234375, |
|
"learning_rate": 2.566103251493184e-06, |
|
"logits/chosen": -5.295260429382324, |
|
"logits/rejected": -5.839069366455078, |
|
"logps/chosen": -13.433932304382324, |
|
"logps/rejected": -21.858623504638672, |
|
"loss": 2007.6971, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.11825932562351227, |
|
"rewards/margins": 0.0836234837770462, |
|
"rewards/rejected": -0.20188280940055847, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5731523378582202, |
|
"grad_norm": 9230.3251953125, |
|
"learning_rate": 2.3018751078580287e-06, |
|
"logits/chosen": -5.80704927444458, |
|
"logits/rejected": -6.411242485046387, |
|
"logps/chosen": -17.966712951660156, |
|
"logps/rejected": -25.16571044921875, |
|
"loss": 2067.9564, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.1631396859884262, |
|
"rewards/margins": 0.071785569190979, |
|
"rewards/rejected": -0.2349252700805664, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.6033182503770739, |
|
"grad_norm": 7809.404296875, |
|
"learning_rate": 2.0398616948569495e-06, |
|
"logits/chosen": -6.302067756652832, |
|
"logits/rejected": -7.327548027038574, |
|
"logps/chosen": -22.0832576751709, |
|
"logps/rejected": -31.016321182250977, |
|
"loss": 1986.1408, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.20454540848731995, |
|
"rewards/margins": 0.08862989395856857, |
|
"rewards/rejected": -0.2931753098964691, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6033182503770739, |
|
"eval_logits/chosen": -5.291009902954102, |
|
"eval_logits/rejected": -6.192085266113281, |
|
"eval_logps/chosen": -15.668704986572266, |
|
"eval_logps/rejected": -24.450298309326172, |
|
"eval_loss": 1989.8780517578125, |
|
"eval_rewards/accuracies": 0.7054794430732727, |
|
"eval_rewards/chosen": -0.14076770842075348, |
|
"eval_rewards/margins": 0.08724134415388107, |
|
"eval_rewards/rejected": -0.22800907492637634, |
|
"eval_runtime": 118.4713, |
|
"eval_samples_per_second": 39.301, |
|
"eval_steps_per_second": 0.616, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6334841628959276, |
|
"grad_norm": 7728.20361328125, |
|
"learning_rate": 1.7829919182222752e-06, |
|
"logits/chosen": -5.712789058685303, |
|
"logits/rejected": -6.6895551681518555, |
|
"logps/chosen": -16.13177490234375, |
|
"logps/rejected": -24.802227020263672, |
|
"loss": 2035.7111, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.14506369829177856, |
|
"rewards/margins": 0.08624090254306793, |
|
"rewards/rejected": -0.2313046157360077, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6636500754147813, |
|
"grad_norm": 6645.58544921875, |
|
"learning_rate": 1.5341371857671782e-06, |
|
"logits/chosen": -6.007805824279785, |
|
"logits/rejected": -6.709867000579834, |
|
"logps/chosen": -17.56117057800293, |
|
"logps/rejected": -25.152942657470703, |
|
"loss": 1994.7727, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.15931251645088196, |
|
"rewards/margins": 0.07542156428098679, |
|
"rewards/rejected": -0.23473408818244934, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.693815987933635, |
|
"grad_norm": 9661.85546875, |
|
"learning_rate": 1.2960793094762347e-06, |
|
"logits/chosen": -5.852481842041016, |
|
"logits/rejected": -6.616735935211182, |
|
"logps/chosen": -17.260818481445312, |
|
"logps/rejected": -24.995773315429688, |
|
"loss": 2002.0502, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.1562873125076294, |
|
"rewards/margins": 0.0772886723279953, |
|
"rewards/rejected": -0.2335759848356247, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.7239819004524887, |
|
"grad_norm": 6644.81982421875, |
|
"learning_rate": 1.0714794091391074e-06, |
|
"logits/chosen": -5.791241645812988, |
|
"logits/rejected": -6.980679512023926, |
|
"logps/chosen": -17.29695701599121, |
|
"logps/rejected": -27.567981719970703, |
|
"loss": 2000.973, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.1567213237285614, |
|
"rewards/margins": 0.1022886261343956, |
|
"rewards/rejected": -0.2590099275112152, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.7541478129713424, |
|
"grad_norm": 7918.15234375, |
|
"learning_rate": 8.628481651367876e-07, |
|
"logits/chosen": -5.269460201263428, |
|
"logits/rejected": -6.024519920349121, |
|
"logps/chosen": -14.571146965026855, |
|
"logps/rejected": -20.592914581298828, |
|
"loss": 2004.9424, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.12985333800315857, |
|
"rewards/margins": 0.059926945716142654, |
|
"rewards/rejected": -0.18978026509284973, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.7843137254901961, |
|
"grad_norm": 11115.212890625, |
|
"learning_rate": 6.72517752908321e-07, |
|
"logits/chosen": -5.319769859313965, |
|
"logits/rejected": -6.440445899963379, |
|
"logps/chosen": -16.488807678222656, |
|
"logps/rejected": -25.459197998046875, |
|
"loss": 1987.9658, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.14929898083209991, |
|
"rewards/margins": 0.08880755305290222, |
|
"rewards/rejected": -0.23810651898384094, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.8144796380090498, |
|
"grad_norm": 8613.94921875, |
|
"learning_rate": 5.026157728273967e-07, |
|
"logits/chosen": -5.407899379730225, |
|
"logits/rejected": -6.104758262634277, |
|
"logps/chosen": -14.600807189941406, |
|
"logps/rejected": -21.75990867614746, |
|
"loss": 1966.2213, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.13016536831855774, |
|
"rewards/margins": 0.07122168689966202, |
|
"rewards/rejected": -0.20138704776763916, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.8446455505279035, |
|
"grad_norm": 8552.6865234375, |
|
"learning_rate": 3.5504146691255736e-07, |
|
"logits/chosen": -5.882079124450684, |
|
"logits/rejected": -6.893612861633301, |
|
"logps/chosen": -19.17007064819336, |
|
"logps/rejected": -29.472118377685547, |
|
"loss": 1977.0953, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.17585688829421997, |
|
"rewards/margins": 0.10231776535511017, |
|
"rewards/rejected": -0.27817463874816895, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.8748114630467572, |
|
"grad_norm": 13345.8681640625, |
|
"learning_rate": 2.3144448823151394e-07, |
|
"logits/chosen": -6.029318332672119, |
|
"logits/rejected": -6.946678161621094, |
|
"logps/chosen": -20.550439834594727, |
|
"logps/rejected": -28.8807373046875, |
|
"loss": 2005.8379, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.18934504687786102, |
|
"rewards/margins": 0.08309401571750641, |
|
"rewards/rejected": -0.27243906259536743, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.9049773755656109, |
|
"grad_norm": 7972.18017578125, |
|
"learning_rate": 1.3320646032487394e-07, |
|
"logits/chosen": -5.894993305206299, |
|
"logits/rejected": -6.6036481857299805, |
|
"logps/chosen": -18.44249153137207, |
|
"logps/rejected": -27.600574493408203, |
|
"loss": 2000.1098, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.16788385808467865, |
|
"rewards/margins": 0.0909862369298935, |
|
"rewards/rejected": -0.25887009501457214, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9049773755656109, |
|
"eval_logits/chosen": -5.0283427238464355, |
|
"eval_logits/rejected": -6.058168411254883, |
|
"eval_logps/chosen": -15.66148567199707, |
|
"eval_logps/rejected": -25.235349655151367, |
|
"eval_loss": 1964.476806640625, |
|
"eval_rewards/accuracies": 0.7243150472640991, |
|
"eval_rewards/chosen": -0.14069554209709167, |
|
"eval_rewards/margins": 0.09516405314207077, |
|
"eval_rewards/rejected": -0.23585957288742065, |
|
"eval_runtime": 118.2884, |
|
"eval_samples_per_second": 39.361, |
|
"eval_steps_per_second": 0.617, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9351432880844646, |
|
"grad_norm": 10022.904296875, |
|
"learning_rate": 6.142553278648239e-08, |
|
"logits/chosen": -5.300992012023926, |
|
"logits/rejected": -6.482455253601074, |
|
"logps/chosen": -14.489970207214355, |
|
"logps/rejected": -24.03298568725586, |
|
"loss": 1974.7594, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.1287548840045929, |
|
"rewards/margins": 0.09496350586414337, |
|
"rewards/rejected": -0.22371840476989746, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.9653092006033183, |
|
"grad_norm": 10649.0498046875, |
|
"learning_rate": 1.6904105645142443e-08, |
|
"logits/chosen": -5.454372406005859, |
|
"logits/rejected": -6.349032402038574, |
|
"logps/chosen": -14.140310287475586, |
|
"logps/rejected": -23.745637893676758, |
|
"loss": 2000.5619, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.12575171887874603, |
|
"rewards/margins": 0.09515827149152756, |
|
"rewards/rejected": -0.220909982919693, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.995475113122172, |
|
"grad_norm": 9284.1181640625, |
|
"learning_rate": 1.398597702123583e-10, |
|
"logits/chosen": -5.438736438751221, |
|
"logits/rejected": -6.345780849456787, |
|
"logps/chosen": -16.6071720123291, |
|
"logps/rejected": -23.371334075927734, |
|
"loss": 2015.8324, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.15046890079975128, |
|
"rewards/margins": 0.0670023113489151, |
|
"rewards/rejected": -0.21747121214866638, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.9984917043740573, |
|
"step": 331, |
|
"total_flos": 0.0, |
|
"train_loss": 2150.5997450906343, |
|
"train_runtime": 2663.9905, |
|
"train_samples_per_second": 15.924, |
|
"train_steps_per_second": 0.124 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 331, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|