File size: 13,610 Bytes
7b8f230 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.996510067114094,
"eval_steps": 400,
"global_step": 116,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008590604026845637,
"grad_norm": 26.041106753964655,
"learning_rate": 4.166666666666666e-08,
"logits/chosen": -1.5759165287017822,
"logits/rejected": -1.7932627201080322,
"logps/chosen": -402.52313232421875,
"logps/rejected": -1099.9710693359375,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.042953020134228186,
"grad_norm": 31.18860734724652,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -1.7433890104293823,
"logits/rejected": -2.201554775238037,
"logps/chosen": -531.8023681640625,
"logps/rejected": -2555.797607421875,
"loss": 0.6926,
"rewards/accuracies": 0.3515625,
"rewards/chosen": 0.0006599800544790924,
"rewards/margins": 0.000775136286392808,
"rewards/rejected": -0.00011515617370605469,
"step": 5
},
{
"epoch": 0.08590604026845637,
"grad_norm": 27.29698099739781,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -1.6542924642562866,
"logits/rejected": -2.1482906341552734,
"logps/chosen": -526.591064453125,
"logps/rejected": -2324.423828125,
"loss": 0.6773,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.0068870047107338905,
"rewards/margins": 0.027588676661252975,
"rewards/rejected": -0.03447568044066429,
"step": 10
},
{
"epoch": 0.12885906040268458,
"grad_norm": 19.203276482338538,
"learning_rate": 4.989741394042727e-07,
"logits/chosen": -1.768988847732544,
"logits/rejected": -2.3421971797943115,
"logps/chosen": -543.0718994140625,
"logps/rejected": -2687.59716796875,
"loss": 0.6056,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -0.043904103338718414,
"rewards/margins": 0.24259641766548157,
"rewards/rejected": -0.2865005135536194,
"step": 15
},
{
"epoch": 0.17181208053691274,
"grad_norm": 10.652934421675868,
"learning_rate": 4.92735454356513e-07,
"logits/chosen": -1.692357063293457,
"logits/rejected": -2.1176271438598633,
"logps/chosen": -575.0624389648438,
"logps/rejected": -2352.69873046875,
"loss": 0.4794,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -0.19454456865787506,
"rewards/margins": 0.9079583287239075,
"rewards/rejected": -1.1025029420852661,
"step": 20
},
{
"epoch": 0.21476510067114093,
"grad_norm": 6.620549530322006,
"learning_rate": 4.809698831278217e-07,
"logits/chosen": -1.7178752422332764,
"logits/rejected": -2.392866611480713,
"logps/chosen": -683.6615600585938,
"logps/rejected": -2998.378662109375,
"loss": 0.4252,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -0.37876173853874207,
"rewards/margins": 2.0470335483551025,
"rewards/rejected": -2.425795078277588,
"step": 25
},
{
"epoch": 0.25771812080536916,
"grad_norm": 13.984907018293939,
"learning_rate": 4.639453180753619e-07,
"logits/chosen": -1.6685655117034912,
"logits/rejected": -2.3044064044952393,
"logps/chosen": -616.4549560546875,
"logps/rejected": -2758.712646484375,
"loss": 0.3458,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -0.731316864490509,
"rewards/margins": 3.5061488151550293,
"rewards/rejected": -4.237465858459473,
"step": 30
},
{
"epoch": 0.3006711409395973,
"grad_norm": 12.563360315413236,
"learning_rate": 4.420493945100701e-07,
"logits/chosen": -1.7690975666046143,
"logits/rejected": -2.2737460136413574,
"logps/chosen": -740.88134765625,
"logps/rejected": -3318.674560546875,
"loss": 0.3563,
"rewards/accuracies": 0.875,
"rewards/chosen": -1.2466011047363281,
"rewards/margins": 4.410061359405518,
"rewards/rejected": -5.656662940979004,
"step": 35
},
{
"epoch": 0.3436241610738255,
"grad_norm": 58.638493321053595,
"learning_rate": 4.157806645601988e-07,
"logits/chosen": -1.5280271768569946,
"logits/rejected": -1.8888028860092163,
"logps/chosen": -631.5662231445312,
"logps/rejected": -3006.968017578125,
"loss": 0.2634,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.5436829328536987,
"rewards/margins": 4.522477626800537,
"rewards/rejected": -6.066160678863525,
"step": 40
},
{
"epoch": 0.3865771812080537,
"grad_norm": 34.148235602710315,
"learning_rate": 3.857372455503697e-07,
"logits/chosen": -1.418347716331482,
"logits/rejected": -1.708924651145935,
"logps/chosen": -827.2987060546875,
"logps/rejected": -3175.490966796875,
"loss": 0.2708,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -2.246793270111084,
"rewards/margins": 5.506977081298828,
"rewards/rejected": -7.753770351409912,
"step": 45
},
{
"epoch": 0.42953020134228187,
"grad_norm": 75.04768803288361,
"learning_rate": 3.5260320136318924e-07,
"logits/chosen": -1.4919449090957642,
"logits/rejected": -1.7019599676132202,
"logps/chosen": -761.5653686523438,
"logps/rejected": -3045.13427734375,
"loss": 0.2368,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -2.335999011993408,
"rewards/margins": 5.300050258636475,
"rewards/rejected": -7.636049747467041,
"step": 50
},
{
"epoch": 0.47248322147651006,
"grad_norm": 30.998467510893132,
"learning_rate": 3.171329668685942e-07,
"logits/chosen": -1.581739068031311,
"logits/rejected": -1.926107406616211,
"logps/chosen": -772.8366088867188,
"logps/rejected": -3056.0234375,
"loss": 0.2615,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -1.8509727716445923,
"rewards/margins": 4.722422122955322,
"rewards/rejected": -6.573394775390625,
"step": 55
},
{
"epoch": 0.5154362416107383,
"grad_norm": 18.56633622085772,
"learning_rate": 2.801341700638307e-07,
"logits/chosen": -1.4350240230560303,
"logits/rejected": -1.619866967201233,
"logps/chosen": -880.1119995117188,
"logps/rejected": -3234.364013671875,
"loss": 0.194,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.119612216949463,
"rewards/margins": 6.008899688720703,
"rewards/rejected": -8.128512382507324,
"step": 60
},
{
"epoch": 0.5583892617449664,
"grad_norm": 39.169508412837246,
"learning_rate": 2.424492430497778e-07,
"logits/chosen": -1.7028038501739502,
"logits/rejected": -1.7408069372177124,
"logps/chosen": -853.0597534179688,
"logps/rejected": -3378.33642578125,
"loss": 0.1911,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -2.2176003456115723,
"rewards/margins": 6.097519874572754,
"rewards/rejected": -8.315119743347168,
"step": 65
},
{
"epoch": 0.6013422818791946,
"grad_norm": 45.21213119496434,
"learning_rate": 2.0493624054652355e-07,
"logits/chosen": -1.540661096572876,
"logits/rejected": -1.7918895483016968,
"logps/chosen": -935.87939453125,
"logps/rejected": -3163.64208984375,
"loss": 0.2123,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.6157093048095703,
"rewards/margins": 5.561184883117676,
"rewards/rejected": -8.176894187927246,
"step": 70
},
{
"epoch": 0.6442953020134228,
"grad_norm": 62.20110512263006,
"learning_rate": 1.6844930269478273e-07,
"logits/chosen": -1.660971999168396,
"logits/rejected": -1.7829008102416992,
"logps/chosen": -786.4994506835938,
"logps/rejected": -3442.186279296875,
"loss": 0.1726,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -1.9392259120941162,
"rewards/margins": 7.3069915771484375,
"rewards/rejected": -9.246217727661133,
"step": 75
},
{
"epoch": 0.687248322147651,
"grad_norm": 12.849059907790142,
"learning_rate": 1.3381920698905784e-07,
"logits/chosen": -1.7256042957305908,
"logits/rejected": -1.6946690082550049,
"logps/chosen": -784.163330078125,
"logps/rejected": -3483.552734375,
"loss": 0.228,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.541985273361206,
"rewards/margins": 7.170411109924316,
"rewards/rejected": -9.712396621704102,
"step": 80
},
{
"epoch": 0.7302013422818792,
"grad_norm": 26.23757128264686,
"learning_rate": 1.0183445215899584e-07,
"logits/chosen": -1.7951923608779907,
"logits/rejected": -1.84537672996521,
"logps/chosen": -798.8233642578125,
"logps/rejected": -3609.947265625,
"loss": 0.1788,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -2.160109043121338,
"rewards/margins": 5.754651069641113,
"rewards/rejected": -7.914759635925293,
"step": 85
},
{
"epoch": 0.7731543624161074,
"grad_norm": 10.192170734753343,
"learning_rate": 7.322330470336313e-08,
"logits/chosen": -1.6331850290298462,
"logits/rejected": -1.5111037492752075,
"logps/chosen": -793.5105590820312,
"logps/rejected": -2994.8486328125,
"loss": 0.1363,
"rewards/accuracies": 0.981249988079071,
"rewards/chosen": -2.2809009552001953,
"rewards/margins": 6.116978168487549,
"rewards/rejected": -8.397878646850586,
"step": 90
},
{
"epoch": 0.8161073825503355,
"grad_norm": 25.55626868432015,
"learning_rate": 4.863721686226349e-08,
"logits/chosen": -1.4511371850967407,
"logits/rejected": -1.4803770780563354,
"logps/chosen": -715.542236328125,
"logps/rejected": -3007.235107421875,
"loss": 0.2881,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.2814974784851074,
"rewards/margins": 7.453909397125244,
"rewards/rejected": -9.735406875610352,
"step": 95
},
{
"epoch": 0.8590604026845637,
"grad_norm": 50.01533007273521,
"learning_rate": 2.863599358669755e-08,
"logits/chosen": -1.7344436645507812,
"logits/rejected": -1.474273681640625,
"logps/chosen": -685.488525390625,
"logps/rejected": -3361.528564453125,
"loss": 0.2129,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.257279872894287,
"rewards/margins": 8.946730613708496,
"rewards/rejected": -11.204010963439941,
"step": 100
},
{
"epoch": 0.9020134228187919,
"grad_norm": 31.116313265685257,
"learning_rate": 1.3675046241339916e-08,
"logits/chosen": -1.6095584630966187,
"logits/rejected": -1.4253101348876953,
"logps/chosen": -858.84130859375,
"logps/rejected": -3048.67578125,
"loss": 0.2266,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.7480015754699707,
"rewards/margins": 6.334467887878418,
"rewards/rejected": -9.082470893859863,
"step": 105
},
{
"epoch": 0.9449664429530201,
"grad_norm": 24.56992314384152,
"learning_rate": 4.0950232632141205e-09,
"logits/chosen": -1.5636495351791382,
"logits/rejected": -1.5311402082443237,
"logps/chosen": -720.26708984375,
"logps/rejected": -3027.19873046875,
"loss": 0.2279,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.2057013511657715,
"rewards/margins": 7.12615442276001,
"rewards/rejected": -9.331854820251465,
"step": 110
},
{
"epoch": 0.9879194630872483,
"grad_norm": 24.93170608381977,
"learning_rate": 1.1405387761664887e-10,
"logits/chosen": -1.6431703567504883,
"logits/rejected": -1.5806033611297607,
"logps/chosen": -681.1459350585938,
"logps/rejected": -3343.632080078125,
"loss": 0.1556,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": -1.9711568355560303,
"rewards/margins": 8.062986373901367,
"rewards/rejected": -10.034143447875977,
"step": 115
},
{
"epoch": 0.996510067114094,
"step": 116,
"total_flos": 0.0,
"train_loss": 0.3046778245218869,
"train_runtime": 5634.2561,
"train_samples_per_second": 2.644,
"train_steps_per_second": 0.021
}
],
"logging_steps": 5,
"max_steps": 116,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|