File size: 13,863 Bytes
13200eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.549186676994578,
  "eval_steps": 500,
  "global_step": 500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.061967467079783116,
      "grad_norm": 0.31712573766708374,
      "learning_rate": 0.0001,
      "logits/chosen": -2.0377116203308105,
      "logits/rejected": -1.4957941770553589,
      "logps/chosen": -366.8505859375,
      "logps/rejected": -216.56210327148438,
      "loss": 0.7399,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": 0.4758438169956207,
      "rewards/margins": 0.7039788961410522,
      "rewards/rejected": -0.2281351387500763,
      "step": 20
    },
    {
      "epoch": 0.12393493415956623,
      "grad_norm": 0.6672604084014893,
      "learning_rate": 0.0002,
      "logits/chosen": -2.0871264934539795,
      "logits/rejected": -1.5913448333740234,
      "logps/chosen": -343.7679443359375,
      "logps/rejected": -225.5177764892578,
      "loss": 0.1009,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": 1.8198397159576416,
      "rewards/margins": 4.43993616104126,
      "rewards/rejected": -2.620096206665039,
      "step": 40
    },
    {
      "epoch": 0.18590240123934934,
      "grad_norm": 0.05973530188202858,
      "learning_rate": 0.0003,
      "logits/chosen": -2.024085521697998,
      "logits/rejected": -1.5500152111053467,
      "logps/chosen": -337.0369873046875,
      "logps/rejected": -244.14907836914062,
      "loss": 0.0377,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": 1.434114694595337,
      "rewards/margins": 6.376841068267822,
      "rewards/rejected": -4.9427266120910645,
      "step": 60
    },
    {
      "epoch": 0.24786986831913246,
      "grad_norm": 0.2159392535686493,
      "learning_rate": 0.0004,
      "logits/chosen": -1.8774925470352173,
      "logits/rejected": -1.367897868156433,
      "logps/chosen": -346.57025146484375,
      "logps/rejected": -268.8457946777344,
      "loss": 0.0185,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": 0.6674507260322571,
      "rewards/margins": 8.490525245666504,
      "rewards/rejected": -7.823073387145996,
      "step": 80
    },
    {
      "epoch": 0.30983733539891556,
      "grad_norm": 0.1853674203157425,
      "learning_rate": 0.0005,
      "logits/chosen": -1.7931022644042969,
      "logits/rejected": -1.3545161485671997,
      "logps/chosen": -341.8374938964844,
      "logps/rejected": -290.44158935546875,
      "loss": 0.0162,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -0.9960247278213501,
      "rewards/margins": 8.982015609741211,
      "rewards/rejected": -9.97804069519043,
      "step": 100
    },
    {
      "epoch": 0.3718048024786987,
      "grad_norm": 0.07802274823188782,
      "learning_rate": 0.0004969220851487844,
      "logits/chosen": -2.0918784141540527,
      "logits/rejected": -1.7004894018173218,
      "logps/chosen": -374.20416259765625,
      "logps/rejected": -319.31304931640625,
      "loss": 0.0243,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -1.1373934745788574,
      "rewards/margins": 9.278127670288086,
      "rewards/rejected": -10.415521621704102,
      "step": 120
    },
    {
      "epoch": 0.4337722695584818,
      "grad_norm": 0.014924481511116028,
      "learning_rate": 0.0004877641290737884,
      "logits/chosen": -2.199770450592041,
      "logits/rejected": -1.7312860488891602,
      "logps/chosen": -357.83184814453125,
      "logps/rejected": -289.49066162109375,
      "loss": 0.0138,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.0910751074552536,
      "rewards/margins": 8.973724365234375,
      "rewards/rejected": -8.882649421691895,
      "step": 140
    },
    {
      "epoch": 0.4957397366382649,
      "grad_norm": 0.08808302134275436,
      "learning_rate": 0.00047275163104709196,
      "logits/chosen": -2.0726726055145264,
      "logits/rejected": -1.5627789497375488,
      "logps/chosen": -360.4253845214844,
      "logps/rejected": -310.8641357421875,
      "loss": 0.0104,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -0.6570650339126587,
      "rewards/margins": 10.160908699035645,
      "rewards/rejected": -10.817973136901855,
      "step": 160
    },
    {
      "epoch": 0.557707203718048,
      "grad_norm": 0.15174007415771484,
      "learning_rate": 0.0004522542485937369,
      "logits/chosen": -1.987160086631775,
      "logits/rejected": -1.4582111835479736,
      "logps/chosen": -353.457275390625,
      "logps/rejected": -307.9656677246094,
      "loss": 0.014,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -0.5743726491928101,
      "rewards/margins": 10.448052406311035,
      "rewards/rejected": -11.022424697875977,
      "step": 180
    },
    {
      "epoch": 0.6196746707978311,
      "grad_norm": 0.671403169631958,
      "learning_rate": 0.00042677669529663686,
      "logits/chosen": -1.911975622177124,
      "logits/rejected": -1.3982415199279785,
      "logps/chosen": -375.1275634765625,
      "logps/rejected": -337.18798828125,
      "loss": 0.0157,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -1.584790825843811,
      "rewards/margins": 10.48969841003418,
      "rewards/rejected": -12.074487686157227,
      "step": 200
    },
    {
      "epoch": 0.6816421378776143,
      "grad_norm": 0.004173076245933771,
      "learning_rate": 0.0003969463130731183,
      "logits/chosen": -1.8694168329238892,
      "logits/rejected": -1.2515356540679932,
      "logps/chosen": -371.6986389160156,
      "logps/rejected": -321.53704833984375,
      "loss": 0.0113,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -1.3206509351730347,
      "rewards/margins": 10.872579574584961,
      "rewards/rejected": -12.193229675292969,
      "step": 220
    },
    {
      "epoch": 0.7436096049573974,
      "grad_norm": 0.003299139440059662,
      "learning_rate": 0.00036349762493488667,
      "logits/chosen": -1.8045600652694702,
      "logits/rejected": -1.0531251430511475,
      "logps/chosen": -363.6612243652344,
      "logps/rejected": -331.32403564453125,
      "loss": 0.0051,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.075836181640625,
      "rewards/margins": 11.123804092407227,
      "rewards/rejected": -13.199640274047852,
      "step": 240
    },
    {
      "epoch": 0.8055770720371804,
      "grad_norm": 0.009364907629787922,
      "learning_rate": 0.00032725424859373687,
      "logits/chosen": -1.8670756816864014,
      "logits/rejected": -1.1788039207458496,
      "logps/chosen": -372.3548889160156,
      "logps/rejected": -366.14385986328125,
      "loss": 0.0093,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -2.1455836296081543,
      "rewards/margins": 12.034472465515137,
      "rewards/rejected": -14.180055618286133,
      "step": 260
    },
    {
      "epoch": 0.8675445391169636,
      "grad_norm": 0.0019821953028440475,
      "learning_rate": 0.00028910861626005774,
      "logits/chosen": -1.9058361053466797,
      "logits/rejected": -1.2149780988693237,
      "logps/chosen": -367.235107421875,
      "logps/rejected": -340.05230712890625,
      "loss": 0.006,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -2.0556015968322754,
      "rewards/margins": 11.500048637390137,
      "rewards/rejected": -13.55565071105957,
      "step": 280
    },
    {
      "epoch": 0.9295120061967467,
      "grad_norm": 0.006883636582642794,
      "learning_rate": 0.00025,
      "logits/chosen": -1.791246771812439,
      "logits/rejected": -1.0243983268737793,
      "logps/chosen": -392.791015625,
      "logps/rejected": -365.2568359375,
      "loss": 0.0058,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.8660073280334473,
      "rewards/margins": 13.002245903015137,
      "rewards/rejected": -16.86825180053711,
      "step": 300
    },
    {
      "epoch": 0.9914794732765299,
      "grad_norm": 0.0022304155863821507,
      "learning_rate": 0.00021089138373994224,
      "logits/chosen": -1.8826805353164673,
      "logits/rejected": -1.1826245784759521,
      "logps/chosen": -379.79266357421875,
      "logps/rejected": -342.39312744140625,
      "loss": 0.0053,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -2.450031280517578,
      "rewards/margins": 12.384546279907227,
      "rewards/rejected": -14.834577560424805,
      "step": 320
    },
    {
      "epoch": 1.053446940356313,
      "grad_norm": 0.021027542650699615,
      "learning_rate": 0.00017274575140626317,
      "logits/chosen": -1.9100282192230225,
      "logits/rejected": -1.1825989484786987,
      "logps/chosen": -393.48309326171875,
      "logps/rejected": -363.25396728515625,
      "loss": 0.0078,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.6979461908340454,
      "rewards/margins": 12.675543785095215,
      "rewards/rejected": -14.373491287231445,
      "step": 340
    },
    {
      "epoch": 1.115414407436096,
      "grad_norm": 0.0013823028421029449,
      "learning_rate": 0.00013650237506511331,
      "logits/chosen": -1.9101250171661377,
      "logits/rejected": -1.1503610610961914,
      "logps/chosen": -370.9356689453125,
      "logps/rejected": -333.2190856933594,
      "loss": 0.0031,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -1.6975269317626953,
      "rewards/margins": 12.453495025634766,
      "rewards/rejected": -14.151021957397461,
      "step": 360
    },
    {
      "epoch": 1.1773818745158793,
      "grad_norm": 0.0010998768266290426,
      "learning_rate": 0.00010305368692688174,
      "logits/chosen": -1.9123481512069702,
      "logits/rejected": -1.2217817306518555,
      "logps/chosen": -382.7933654785156,
      "logps/rejected": -355.1229248046875,
      "loss": 0.0032,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.9995158910751343,
      "rewards/margins": 12.895052909851074,
      "rewards/rejected": -14.894567489624023,
      "step": 380
    },
    {
      "epoch": 1.2393493415956622,
      "grad_norm": 0.00032321063918061554,
      "learning_rate": 7.322330470336314e-05,
      "logits/chosen": -1.904104471206665,
      "logits/rejected": -1.1937545537948608,
      "logps/chosen": -379.06524658203125,
      "logps/rejected": -346.93865966796875,
      "loss": 0.0038,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -1.5791075229644775,
      "rewards/margins": 13.05359935760498,
      "rewards/rejected": -14.632705688476562,
      "step": 400
    },
    {
      "epoch": 1.3013168086754454,
      "grad_norm": 0.04171500727534294,
      "learning_rate": 4.7745751406263163e-05,
      "logits/chosen": -1.8675615787506104,
      "logits/rejected": -1.1352770328521729,
      "logps/chosen": -372.1733093261719,
      "logps/rejected": -350.90399169921875,
      "loss": 0.0032,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.7773971557617188,
      "rewards/margins": 13.206622123718262,
      "rewards/rejected": -14.984021186828613,
      "step": 420
    },
    {
      "epoch": 1.3632842757552286,
      "grad_norm": 0.010930106975138187,
      "learning_rate": 2.7248368952908055e-05,
      "logits/chosen": -1.8420419692993164,
      "logits/rejected": -1.1195908784866333,
      "logps/chosen": -380.8678894042969,
      "logps/rejected": -368.0999755859375,
      "loss": 0.0007,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.5592141151428223,
      "rewards/margins": 13.297220230102539,
      "rewards/rejected": -15.85643196105957,
      "step": 440
    },
    {
      "epoch": 1.4252517428350115,
      "grad_norm": 0.004741205833852291,
      "learning_rate": 1.2235870926211617e-05,
      "logits/chosen": -1.8401530981063843,
      "logits/rejected": -1.1240848302841187,
      "logps/chosen": -372.3128967285156,
      "logps/rejected": -352.5906982421875,
      "loss": 0.0017,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.5608876943588257,
      "rewards/margins": 13.396890640258789,
      "rewards/rejected": -14.957778930664062,
      "step": 460
    },
    {
      "epoch": 1.4872192099147947,
      "grad_norm": 0.010726602748036385,
      "learning_rate": 3.077914851215585e-06,
      "logits/chosen": -1.8262317180633545,
      "logits/rejected": -1.0366793870925903,
      "logps/chosen": -390.6006774902344,
      "logps/rejected": -352.1526794433594,
      "loss": 0.0053,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -2.158936023712158,
      "rewards/margins": 13.084844589233398,
      "rewards/rejected": -15.243780136108398,
      "step": 480
    },
    {
      "epoch": 1.549186676994578,
      "grad_norm": 0.00336117553524673,
      "learning_rate": 0.0,
      "logits/chosen": -1.8495571613311768,
      "logits/rejected": -1.1169774532318115,
      "logps/chosen": -371.83416748046875,
      "logps/rejected": -368.6435546875,
      "loss": 0.0048,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -2.3796725273132324,
      "rewards/margins": 13.638076782226562,
      "rewards/rejected": -16.017749786376953,
      "step": 500
    }
  ],
  "logging_steps": 20,
  "max_steps": 500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}