File size: 28,936 Bytes
3a97bb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9984301412872841,
  "eval_steps": 100,
  "global_step": 477,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0020931449502878076,
      "grad_norm": 2.741797685623169,
      "learning_rate": 1.0416666666666666e-08,
      "logits/chosen": -3.1097278594970703,
      "logits/rejected": -3.080122470855713,
      "logps/chosen": -389.3681640625,
      "logps/rejected": -352.00482177734375,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.020931449502878074,
      "grad_norm": 2.746737480163574,
      "learning_rate": 1.0416666666666667e-07,
      "logits/chosen": -2.9152116775512695,
      "logits/rejected": -2.905562400817871,
      "logps/chosen": -323.6693420410156,
      "logps/rejected": -297.4331359863281,
      "loss": 0.6932,
      "rewards/accuracies": 0.4201388955116272,
      "rewards/chosen": -0.00030070680077187717,
      "rewards/margins": -0.00017917003424372524,
      "rewards/rejected": -0.00012153676652815193,
      "step": 10
    },
    {
      "epoch": 0.04186289900575615,
      "grad_norm": 2.543320655822754,
      "learning_rate": 2.0833333333333333e-07,
      "logits/chosen": -2.9843814373016357,
      "logits/rejected": -2.9781064987182617,
      "logps/chosen": -335.30413818359375,
      "logps/rejected": -316.4299621582031,
      "loss": 0.6931,
      "rewards/accuracies": 0.5218750238418579,
      "rewards/chosen": 0.0004041799402330071,
      "rewards/margins": 0.0005227966466918588,
      "rewards/rejected": -0.0001186166555271484,
      "step": 20
    },
    {
      "epoch": 0.06279434850863422,
      "grad_norm": 2.5893585681915283,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": -2.997720241546631,
      "logits/rejected": -2.9677913188934326,
      "logps/chosen": -345.15240478515625,
      "logps/rejected": -295.96966552734375,
      "loss": 0.6928,
      "rewards/accuracies": 0.528124988079071,
      "rewards/chosen": -0.0004641309496946633,
      "rewards/margins": 0.000602933403570205,
      "rewards/rejected": -0.0010670643532648683,
      "step": 30
    },
    {
      "epoch": 0.0837257980115123,
      "grad_norm": 2.6134979724884033,
      "learning_rate": 4.1666666666666667e-07,
      "logits/chosen": -2.9335551261901855,
      "logits/rejected": -2.9302399158477783,
      "logps/chosen": -321.5934143066406,
      "logps/rejected": -289.6072998046875,
      "loss": 0.6919,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.0015825815498828888,
      "rewards/margins": 0.0028794072568416595,
      "rewards/rejected": -0.004461988341063261,
      "step": 40
    },
    {
      "epoch": 0.10465724751439037,
      "grad_norm": 2.6024513244628906,
      "learning_rate": 4.999731868769026e-07,
      "logits/chosen": -2.9198384284973145,
      "logits/rejected": -2.8993093967437744,
      "logps/chosen": -328.54644775390625,
      "logps/rejected": -309.58087158203125,
      "loss": 0.6901,
      "rewards/accuracies": 0.5562499761581421,
      "rewards/chosen": -0.005653353873640299,
      "rewards/margins": 0.004262409172952175,
      "rewards/rejected": -0.009915763512253761,
      "step": 50
    },
    {
      "epoch": 0.12558869701726844,
      "grad_norm": 2.767859697341919,
      "learning_rate": 4.990353313429303e-07,
      "logits/chosen": -2.982860565185547,
      "logits/rejected": -2.969365119934082,
      "logps/chosen": -307.7032470703125,
      "logps/rejected": -304.3933410644531,
      "loss": 0.6874,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.015346085652709007,
      "rewards/margins": 0.010624411515891552,
      "rewards/rejected": -0.025970498099923134,
      "step": 60
    },
    {
      "epoch": 0.14652014652014653,
      "grad_norm": 2.7651147842407227,
      "learning_rate": 4.967625656594781e-07,
      "logits/chosen": -2.9431705474853516,
      "logits/rejected": -2.953648090362549,
      "logps/chosen": -346.7774658203125,
      "logps/rejected": -312.93865966796875,
      "loss": 0.6826,
      "rewards/accuracies": 0.653124988079071,
      "rewards/chosen": -0.04352644830942154,
      "rewards/margins": 0.024388128891587257,
      "rewards/rejected": -0.06791457533836365,
      "step": 70
    },
    {
      "epoch": 0.1674515960230246,
      "grad_norm": 2.939892053604126,
      "learning_rate": 4.93167072587771e-07,
      "logits/chosen": -2.973877429962158,
      "logits/rejected": -2.9495885372161865,
      "logps/chosen": -362.0614929199219,
      "logps/rejected": -301.11322021484375,
      "loss": 0.6771,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": -0.08665237575769424,
      "rewards/margins": 0.0347968153655529,
      "rewards/rejected": -0.12144919484853745,
      "step": 80
    },
    {
      "epoch": 0.18838304552590268,
      "grad_norm": 2.9581210613250732,
      "learning_rate": 4.882681251368548e-07,
      "logits/chosen": -2.9634270668029785,
      "logits/rejected": -2.9433810710906982,
      "logps/chosen": -323.24359130859375,
      "logps/rejected": -298.01763916015625,
      "loss": 0.6728,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.13328225910663605,
      "rewards/margins": 0.03973008692264557,
      "rewards/rejected": -0.17301234602928162,
      "step": 90
    },
    {
      "epoch": 0.20931449502878074,
      "grad_norm": 3.1116480827331543,
      "learning_rate": 4.820919832540181e-07,
      "logits/chosen": -2.9381461143493652,
      "logits/rejected": -2.9422552585601807,
      "logps/chosen": -342.9717102050781,
      "logps/rejected": -310.58758544921875,
      "loss": 0.6685,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.14851507544517517,
      "rewards/margins": 0.057429663836956024,
      "rewards/rejected": -0.2059447318315506,
      "step": 100
    },
    {
      "epoch": 0.20931449502878074,
      "eval_logits/chosen": -2.950716972351074,
      "eval_logits/rejected": -2.9254934787750244,
      "eval_logps/chosen": -355.60272216796875,
      "eval_logps/rejected": -317.9681701660156,
      "eval_loss": 0.6694281101226807,
      "eval_rewards/accuracies": 0.6527777910232544,
      "eval_rewards/chosen": -0.1298944652080536,
      "eval_rewards/margins": 0.06442055851221085,
      "eval_rewards/rejected": -0.19431501626968384,
      "eval_runtime": 86.0849,
      "eval_samples_per_second": 23.233,
      "eval_steps_per_second": 0.732,
      "step": 100
    },
    {
      "epoch": 0.2302459445316588,
      "grad_norm": 3.7738163471221924,
      "learning_rate": 4.7467175306295647e-07,
      "logits/chosen": -2.926741123199463,
      "logits/rejected": -2.913757562637329,
      "logps/chosen": -356.9674377441406,
      "logps/rejected": -310.5869140625,
      "loss": 0.6697,
      "rewards/accuracies": 0.606249988079071,
      "rewards/chosen": -0.14377950131893158,
      "rewards/margins": 0.06109999865293503,
      "rewards/rejected": -0.204879492521286,
      "step": 110
    },
    {
      "epoch": 0.25117739403453687,
      "grad_norm": 3.45920467376709,
      "learning_rate": 4.6604720940421207e-07,
      "logits/chosen": -2.854685068130493,
      "logits/rejected": -2.851743459701538,
      "logps/chosen": -349.94189453125,
      "logps/rejected": -327.46820068359375,
      "loss": 0.6557,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.174159973859787,
      "rewards/margins": 0.08476099371910095,
      "rewards/rejected": -0.25892096757888794,
      "step": 120
    },
    {
      "epoch": 0.272108843537415,
      "grad_norm": 4.213115692138672,
      "learning_rate": 4.5626458262912735e-07,
      "logits/chosen": -2.8723671436309814,
      "logits/rejected": -2.8682103157043457,
      "logps/chosen": -364.5023498535156,
      "logps/rejected": -344.17413330078125,
      "loss": 0.65,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": -0.26356348395347595,
      "rewards/margins": 0.08755739033222198,
      "rewards/rejected": -0.35112088918685913,
      "step": 130
    },
    {
      "epoch": 0.29304029304029305,
      "grad_norm": 4.351049900054932,
      "learning_rate": 4.453763107901675e-07,
      "logits/chosen": -2.8395862579345703,
      "logits/rejected": -2.8383350372314453,
      "logps/chosen": -386.54754638671875,
      "logps/rejected": -353.8211975097656,
      "loss": 0.6576,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -0.30667954683303833,
      "rewards/margins": 0.11848233640193939,
      "rewards/rejected": -0.4251618981361389,
      "step": 140
    },
    {
      "epoch": 0.3139717425431711,
      "grad_norm": 5.293145656585693,
      "learning_rate": 4.3344075855595097e-07,
      "logits/chosen": -2.837963581085205,
      "logits/rejected": -2.8277411460876465,
      "logps/chosen": -363.89532470703125,
      "logps/rejected": -334.4164733886719,
      "loss": 0.6562,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.3559018671512604,
      "rewards/margins": 0.0798763707280159,
      "rewards/rejected": -0.43577829003334045,
      "step": 150
    },
    {
      "epoch": 0.3349031920460492,
      "grad_norm": 5.583098411560059,
      "learning_rate": 4.2052190435769554e-07,
      "logits/chosen": -2.8631508350372314,
      "logits/rejected": -2.8266890048980713,
      "logps/chosen": -359.99200439453125,
      "logps/rejected": -320.49151611328125,
      "loss": 0.6453,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": -0.34305575489997864,
      "rewards/margins": 0.13357409834861755,
      "rewards/rejected": -0.4766298234462738,
      "step": 160
    },
    {
      "epoch": 0.35583464154892724,
      "grad_norm": 5.565986633300781,
      "learning_rate": 4.0668899744407567e-07,
      "logits/chosen": -2.800814628601074,
      "logits/rejected": -2.7905514240264893,
      "logps/chosen": -349.87725830078125,
      "logps/rejected": -323.04150390625,
      "loss": 0.6516,
      "rewards/accuracies": 0.606249988079071,
      "rewards/chosen": -0.4009243845939636,
      "rewards/margins": 0.12320031225681305,
      "rewards/rejected": -0.5241247415542603,
      "step": 170
    },
    {
      "epoch": 0.37676609105180536,
      "grad_norm": 6.890281677246094,
      "learning_rate": 3.920161866827889e-07,
      "logits/chosen": -2.8282012939453125,
      "logits/rejected": -2.8248977661132812,
      "logps/chosen": -382.04364013671875,
      "logps/rejected": -350.888427734375,
      "loss": 0.64,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": -0.4164988100528717,
      "rewards/margins": 0.15513385832309723,
      "rewards/rejected": -0.5716326832771301,
      "step": 180
    },
    {
      "epoch": 0.3976975405546834,
      "grad_norm": 5.489590167999268,
      "learning_rate": 3.765821230985757e-07,
      "logits/chosen": -2.8130276203155518,
      "logits/rejected": -2.7892467975616455,
      "logps/chosen": -350.63873291015625,
      "logps/rejected": -333.55718994140625,
      "loss": 0.6408,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.3725319802761078,
      "rewards/margins": 0.13300345838069916,
      "rewards/rejected": -0.5055354833602905,
      "step": 190
    },
    {
      "epoch": 0.4186289900575615,
      "grad_norm": 6.967275142669678,
      "learning_rate": 3.604695382782159e-07,
      "logits/chosen": -2.787351608276367,
      "logits/rejected": -2.789077043533325,
      "logps/chosen": -370.17864990234375,
      "logps/rejected": -361.43988037109375,
      "loss": 0.642,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.38552746176719666,
      "rewards/margins": 0.16969361901283264,
      "rewards/rejected": -0.5552210807800293,
      "step": 200
    },
    {
      "epoch": 0.4186289900575615,
      "eval_logits/chosen": -2.8207905292510986,
      "eval_logits/rejected": -2.7925562858581543,
      "eval_logps/chosen": -385.3469543457031,
      "eval_logps/rejected": -360.2872619628906,
      "eval_loss": 0.6406751871109009,
      "eval_rewards/accuracies": 0.6726190447807312,
      "eval_rewards/chosen": -0.4273369312286377,
      "eval_rewards/margins": 0.19016937911510468,
      "eval_rewards/rejected": -0.6175063252449036,
      "eval_runtime": 86.2297,
      "eval_samples_per_second": 23.194,
      "eval_steps_per_second": 0.731,
      "step": 200
    },
    {
      "epoch": 0.43956043956043955,
      "grad_norm": 8.497602462768555,
      "learning_rate": 3.4376480090239047e-07,
      "logits/chosen": -2.7831473350524902,
      "logits/rejected": -2.7467095851898193,
      "logps/chosen": -387.8429870605469,
      "logps/rejected": -351.0397644042969,
      "loss": 0.6471,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.5111196041107178,
      "rewards/margins": 0.1388254463672638,
      "rewards/rejected": -0.649945080280304,
      "step": 210
    },
    {
      "epoch": 0.4604918890633176,
      "grad_norm": 6.599562168121338,
      "learning_rate": 3.265574537815398e-07,
      "logits/chosen": -2.7829642295837402,
      "logits/rejected": -2.7764458656311035,
      "logps/chosen": -379.44464111328125,
      "logps/rejected": -354.76092529296875,
      "loss": 0.6423,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.49129724502563477,
      "rewards/margins": 0.16036386787891388,
      "rewards/rejected": -0.6516611576080322,
      "step": 220
    },
    {
      "epoch": 0.48142333856619574,
      "grad_norm": 6.966579914093018,
      "learning_rate": 3.0893973387735683e-07,
      "logits/chosen": -2.7712459564208984,
      "logits/rejected": -2.760551929473877,
      "logps/chosen": -370.9303283691406,
      "logps/rejected": -359.6304626464844,
      "loss": 0.6429,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.436328649520874,
      "rewards/margins": 0.17412248253822327,
      "rewards/rejected": -0.6104511022567749,
      "step": 230
    },
    {
      "epoch": 0.5023547880690737,
      "grad_norm": 6.429652690887451,
      "learning_rate": 2.910060778827554e-07,
      "logits/chosen": -2.7925572395324707,
      "logits/rejected": -2.7672150135040283,
      "logps/chosen": -370.3599548339844,
      "logps/rejected": -349.8984680175781,
      "loss": 0.6292,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": -0.4170507490634918,
      "rewards/margins": 0.2068098783493042,
      "rewards/rejected": -0.6238606572151184,
      "step": 240
    },
    {
      "epoch": 0.5232862375719518,
      "grad_norm": 6.210602283477783,
      "learning_rate": 2.7285261601056697e-07,
      "logits/chosen": -2.791151285171509,
      "logits/rejected": -2.761967897415161,
      "logps/chosen": -386.1891784667969,
      "logps/rejected": -342.8139343261719,
      "loss": 0.6327,
      "rewards/accuracies": 0.6781250238418579,
      "rewards/chosen": -0.42713984847068787,
      "rewards/margins": 0.22475413978099823,
      "rewards/rejected": -0.6518939137458801,
      "step": 250
    },
    {
      "epoch": 0.54421768707483,
      "grad_norm": 6.485888481140137,
      "learning_rate": 2.5457665670441937e-07,
      "logits/chosen": -2.8333914279937744,
      "logits/rejected": -2.8468918800354004,
      "logps/chosen": -381.37078857421875,
      "logps/rejected": -365.30963134765625,
      "loss": 0.6332,
      "rewards/accuracies": 0.684374988079071,
      "rewards/chosen": -0.45365315675735474,
      "rewards/margins": 0.20745711028575897,
      "rewards/rejected": -0.6611102223396301,
      "step": 260
    },
    {
      "epoch": 0.565149136577708,
      "grad_norm": 7.725765705108643,
      "learning_rate": 2.3627616503391812e-07,
      "logits/chosen": -2.8207736015319824,
      "logits/rejected": -2.8081822395324707,
      "logps/chosen": -386.81976318359375,
      "logps/rejected": -361.7131652832031,
      "loss": 0.6314,
      "rewards/accuracies": 0.659375011920929,
      "rewards/chosen": -0.44246116280555725,
      "rewards/margins": 0.18473473191261292,
      "rewards/rejected": -0.6271958351135254,
      "step": 270
    },
    {
      "epoch": 0.5860805860805861,
      "grad_norm": 7.315251350402832,
      "learning_rate": 2.1804923757009882e-07,
      "logits/chosen": -2.7474558353424072,
      "logits/rejected": -2.7452392578125,
      "logps/chosen": -373.4273681640625,
      "logps/rejected": -346.6417541503906,
      "loss": 0.6341,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": -0.4729071259498596,
      "rewards/margins": 0.2166450470685959,
      "rewards/rejected": -0.6895521879196167,
      "step": 280
    },
    {
      "epoch": 0.6070120355834642,
      "grad_norm": 7.481057643890381,
      "learning_rate": 1.9999357655598891e-07,
      "logits/chosen": -2.8017992973327637,
      "logits/rejected": -2.7807507514953613,
      "logps/chosen": -372.48126220703125,
      "logps/rejected": -341.9537048339844,
      "loss": 0.6264,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.5083416700363159,
      "rewards/margins": 0.176277756690979,
      "rewards/rejected": -0.6846194863319397,
      "step": 290
    },
    {
      "epoch": 0.6279434850863422,
      "grad_norm": 8.147472381591797,
      "learning_rate": 1.8220596619089573e-07,
      "logits/chosen": -2.7440390586853027,
      "logits/rejected": -2.717979907989502,
      "logps/chosen": -375.93035888671875,
      "logps/rejected": -361.93121337890625,
      "loss": 0.6285,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.4719668924808502,
      "rewards/margins": 0.17040875554084778,
      "rewards/rejected": -0.642375648021698,
      "step": 300
    },
    {
      "epoch": 0.6279434850863422,
      "eval_logits/chosen": -2.801197052001953,
      "eval_logits/rejected": -2.773117780685425,
      "eval_logps/chosen": -389.8437805175781,
      "eval_logps/rejected": -368.0482177734375,
      "eval_loss": 0.6331161856651306,
      "eval_rewards/accuracies": 0.6646825671195984,
      "eval_rewards/chosen": -0.4723050594329834,
      "eval_rewards/margins": 0.22281017899513245,
      "eval_rewards/rejected": -0.6951152086257935,
      "eval_runtime": 85.9334,
      "eval_samples_per_second": 23.274,
      "eval_steps_per_second": 0.733,
      "step": 300
    },
    {
      "epoch": 0.6488749345892203,
      "grad_norm": 11.20168399810791,
      "learning_rate": 1.647817538357072e-07,
      "logits/chosen": -2.736398935317993,
      "logits/rejected": -2.738858461380005,
      "logps/chosen": -391.0700378417969,
      "logps/rejected": -347.84356689453125,
      "loss": 0.622,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.4913768768310547,
      "rewards/margins": 0.23215806484222412,
      "rewards/rejected": -0.723534882068634,
      "step": 310
    },
    {
      "epoch": 0.6698063840920984,
      "grad_norm": 7.335338592529297,
      "learning_rate": 1.478143389201113e-07,
      "logits/chosen": -2.7694268226623535,
      "logits/rejected": -2.74495267868042,
      "logps/chosen": -382.05419921875,
      "logps/rejected": -351.9586486816406,
      "loss": 0.6179,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.5782755017280579,
      "rewards/margins": 0.24550530314445496,
      "rewards/rejected": -0.8237808346748352,
      "step": 320
    },
    {
      "epoch": 0.6907378335949764,
      "grad_norm": 9.174397468566895,
      "learning_rate": 1.3139467229135998e-07,
      "logits/chosen": -2.7542152404785156,
      "logits/rejected": -2.74013352394104,
      "logps/chosen": -391.40618896484375,
      "logps/rejected": -386.9635314941406,
      "loss": 0.63,
      "rewards/accuracies": 0.609375,
      "rewards/chosen": -0.6302947998046875,
      "rewards/margins": 0.17986619472503662,
      "rewards/rejected": -0.8101609945297241,
      "step": 330
    },
    {
      "epoch": 0.7116692830978545,
      "grad_norm": 8.58338451385498,
      "learning_rate": 1.1561076868822755e-07,
      "logits/chosen": -2.75496768951416,
      "logits/rejected": -2.7243494987487793,
      "logps/chosen": -427.29315185546875,
      "logps/rejected": -422.3783264160156,
      "loss": 0.6206,
      "rewards/accuracies": 0.659375011920929,
      "rewards/chosen": -0.6146808862686157,
      "rewards/margins": 0.28181296586990356,
      "rewards/rejected": -0.8964937925338745,
      "step": 340
    },
    {
      "epoch": 0.7326007326007326,
      "grad_norm": 7.745279788970947,
      "learning_rate": 1.0054723495346482e-07,
      "logits/chosen": -2.7472786903381348,
      "logits/rejected": -2.727072238922119,
      "logps/chosen": -380.8338623046875,
      "logps/rejected": -356.40875244140625,
      "loss": 0.6116,
      "rewards/accuracies": 0.6656249761581421,
      "rewards/chosen": -0.6483052968978882,
      "rewards/margins": 0.22691123187541962,
      "rewards/rejected": -0.8752166628837585,
      "step": 350
    },
    {
      "epoch": 0.7535321821036107,
      "grad_norm": 8.188222885131836,
      "learning_rate": 8.628481651367875e-08,
      "logits/chosen": -2.7281575202941895,
      "logits/rejected": -2.714717388153076,
      "logps/chosen": -399.79217529296875,
      "logps/rejected": -374.2989807128906,
      "loss": 0.6313,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.7062140107154846,
      "rewards/margins": 0.22601504623889923,
      "rewards/rejected": -0.9322290420532227,
      "step": 360
    },
    {
      "epoch": 0.7744636316064888,
      "grad_norm": 7.2593889236450195,
      "learning_rate": 7.289996455765748e-08,
      "logits/chosen": -2.753688097000122,
      "logits/rejected": -2.7259750366210938,
      "logps/chosen": -383.2185974121094,
      "logps/rejected": -367.9438781738281,
      "loss": 0.6152,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.7225135564804077,
      "rewards/margins": 0.21898405253887177,
      "rewards/rejected": -0.9414976239204407,
      "step": 370
    },
    {
      "epoch": 0.7953950811093669,
      "grad_norm": 7.146636009216309,
      "learning_rate": 6.046442623320145e-08,
      "logits/chosen": -2.6859023571014404,
      "logits/rejected": -2.695509433746338,
      "logps/chosen": -363.49493408203125,
      "logps/rejected": -377.5242004394531,
      "loss": 0.6159,
      "rewards/accuracies": 0.6656249761581421,
      "rewards/chosen": -0.7377739548683167,
      "rewards/margins": 0.25747013092041016,
      "rewards/rejected": -0.995244026184082,
      "step": 380
    },
    {
      "epoch": 0.8163265306122449,
      "grad_norm": 7.638937473297119,
      "learning_rate": 4.904486005914027e-08,
      "logits/chosen": -2.6865615844726562,
      "logits/rejected": -2.6732540130615234,
      "logps/chosen": -433.0126037597656,
      "logps/rejected": -432.28448486328125,
      "loss": 0.6203,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.6989966034889221,
      "rewards/margins": 0.23467639088630676,
      "rewards/rejected": -0.9336729049682617,
      "step": 390
    },
    {
      "epoch": 0.837257980115123,
      "grad_norm": 8.645991325378418,
      "learning_rate": 3.8702478614051345e-08,
      "logits/chosen": -2.6967921257019043,
      "logits/rejected": -2.6760878562927246,
      "logps/chosen": -396.04595947265625,
      "logps/rejected": -385.61810302734375,
      "loss": 0.6222,
      "rewards/accuracies": 0.6781250238418579,
      "rewards/chosen": -0.674105167388916,
      "rewards/margins": 0.25409311056137085,
      "rewards/rejected": -0.9281982183456421,
      "step": 400
    },
    {
      "epoch": 0.837257980115123,
      "eval_logits/chosen": -2.7399401664733887,
      "eval_logits/rejected": -2.7107229232788086,
      "eval_logps/chosen": -412.686767578125,
      "eval_logps/rejected": -395.38079833984375,
      "eval_loss": 0.6240472197532654,
      "eval_rewards/accuracies": 0.682539701461792,
      "eval_rewards/chosen": -0.7007347345352173,
      "eval_rewards/margins": 0.2677067816257477,
      "eval_rewards/rejected": -0.9684414863586426,
      "eval_runtime": 86.0349,
      "eval_samples_per_second": 23.246,
      "eval_steps_per_second": 0.732,
      "step": 400
    },
    {
      "epoch": 0.858189429618001,
      "grad_norm": 8.63017749786377,
      "learning_rate": 2.9492720416985e-08,
      "logits/chosen": -2.72432279586792,
      "logits/rejected": -2.709479808807373,
      "logps/chosen": -423.2232971191406,
      "logps/rejected": -398.17779541015625,
      "loss": 0.6347,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.6851434111595154,
      "rewards/margins": 0.2306232750415802,
      "rewards/rejected": -0.915766716003418,
      "step": 410
    },
    {
      "epoch": 0.8791208791208791,
      "grad_norm": 8.239731788635254,
      "learning_rate": 2.1464952759020856e-08,
      "logits/chosen": -2.6693649291992188,
      "logits/rejected": -2.672886371612549,
      "logps/chosen": -392.8204040527344,
      "logps/rejected": -393.4344177246094,
      "loss": 0.6118,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": -0.7332924008369446,
      "rewards/margins": 0.24412448704242706,
      "rewards/rejected": -0.9774168133735657,
      "step": 420
    },
    {
      "epoch": 0.9000523286237572,
      "grad_norm": 8.310417175292969,
      "learning_rate": 1.4662207078575684e-08,
      "logits/chosen": -2.6678881645202637,
      "logits/rejected": -2.6358609199523926,
      "logps/chosen": -409.0691833496094,
      "logps/rejected": -372.57183837890625,
      "loss": 0.6136,
      "rewards/accuracies": 0.653124988079071,
      "rewards/chosen": -0.7174139618873596,
      "rewards/margins": 0.21988801658153534,
      "rewards/rejected": -0.9373019933700562,
      "step": 430
    },
    {
      "epoch": 0.9209837781266352,
      "grad_norm": 13.682318687438965,
      "learning_rate": 9.12094829893642e-09,
      "logits/chosen": -2.718200445175171,
      "logits/rejected": -2.689492702484131,
      "logps/chosen": -413.35528564453125,
      "logps/rejected": -406.2598876953125,
      "loss": 0.6068,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.7059027552604675,
      "rewards/margins": 0.23966805636882782,
      "rewards/rejected": -0.9455708265304565,
      "step": 440
    },
    {
      "epoch": 0.9419152276295133,
      "grad_norm": 8.156304359436035,
      "learning_rate": 4.8708793644441086e-09,
      "logits/chosen": -2.63850736618042,
      "logits/rejected": -2.6505093574523926,
      "logps/chosen": -413.0054626464844,
      "logps/rejected": -420.00018310546875,
      "loss": 0.6209,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": -0.7106130719184875,
      "rewards/margins": 0.24646687507629395,
      "rewards/rejected": -0.957080066204071,
      "step": 450
    },
    {
      "epoch": 0.9628466771323915,
      "grad_norm": 10.356741905212402,
      "learning_rate": 1.9347820230782295e-09,
      "logits/chosen": -2.698603630065918,
      "logits/rejected": -2.671870231628418,
      "logps/chosen": -374.42828369140625,
      "logps/rejected": -359.28692626953125,
      "loss": 0.6226,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -0.7496173977851868,
      "rewards/margins": 0.20760098099708557,
      "rewards/rejected": -0.95721834897995,
      "step": 460
    },
    {
      "epoch": 0.9837781266352695,
      "grad_norm": 9.971595764160156,
      "learning_rate": 3.2839470889836627e-10,
      "logits/chosen": -2.7027974128723145,
      "logits/rejected": -2.684480905532837,
      "logps/chosen": -412.5672302246094,
      "logps/rejected": -383.63372802734375,
      "loss": 0.6113,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": -0.7107462882995605,
      "rewards/margins": 0.1883649080991745,
      "rewards/rejected": -0.8991111516952515,
      "step": 470
    },
    {
      "epoch": 0.9984301412872841,
      "step": 477,
      "total_flos": 0.0,
      "train_loss": 0.6428392008415558,
      "train_runtime": 6631.0546,
      "train_samples_per_second": 9.219,
      "train_steps_per_second": 0.072
    }
  ],
  "logging_steps": 10,
  "max_steps": 477,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}