File size: 13,406 Bytes
5a7caa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.2039127163280663,
  "eval_steps": 30,
  "global_step": 100,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.012039127163280662,
      "grad_norm": 11.953282356262207,
      "learning_rate": 5.555555555555555e-08,
      "logits/chosen": -0.48816660046577454,
      "logits/rejected": -0.42142170667648315,
      "logps/chosen": -117.26611328125,
      "logps/rejected": -125.41987609863281,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.06019563581640331,
      "grad_norm": 16.68506622314453,
      "learning_rate": 2.7777777777777776e-07,
      "logits/chosen": -0.46595269441604614,
      "logits/rejected": -0.356529176235199,
      "logps/chosen": -190.95057678222656,
      "logps/rejected": -211.25076293945312,
      "loss": 0.6926,
      "rewards/accuracies": 0.453125,
      "rewards/chosen": 0.0007588082225993276,
      "rewards/margins": 0.0022044419310986996,
      "rewards/rejected": -0.0014456338249146938,
      "step": 5
    },
    {
      "epoch": 0.12039127163280662,
      "grad_norm": 13.722668647766113,
      "learning_rate": 4.999499509357132e-07,
      "logits/chosen": -0.4793759286403656,
      "logits/rejected": -0.37052756547927856,
      "logps/chosen": -155.6678009033203,
      "logps/rejected": -199.44947814941406,
      "loss": 0.6889,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": 0.005186144262552261,
      "rewards/margins": 0.009383995085954666,
      "rewards/rejected": -0.004197851754724979,
      "step": 10
    },
    {
      "epoch": 0.18058690744920994,
      "grad_norm": 12.493337631225586,
      "learning_rate": 4.982003369106287e-07,
      "logits/chosen": -0.49185729026794434,
      "logits/rejected": -0.37670475244522095,
      "logps/chosen": -76.30625915527344,
      "logps/rejected": -177.40817260742188,
      "loss": 0.6691,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.028129320591688156,
      "rewards/margins": 0.048517487943172455,
      "rewards/rejected": -0.020388163626194,
      "step": 15
    },
    {
      "epoch": 0.24078254326561324,
      "grad_norm": 11.79749870300293,
      "learning_rate": 4.939682729058838e-07,
      "logits/chosen": -0.45126277208328247,
      "logits/rejected": -0.3729521930217743,
      "logps/chosen": -166.20733642578125,
      "logps/rejected": -207.5035400390625,
      "loss": 0.6249,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.08626963198184967,
      "rewards/margins": 0.14315639436244965,
      "rewards/rejected": -0.05688678100705147,
      "step": 20
    },
    {
      "epoch": 0.3009781790820166,
      "grad_norm": 10.574383735656738,
      "learning_rate": 4.872960871766826e-07,
      "logits/chosen": -0.4710594713687897,
      "logits/rejected": -0.3694532513618469,
      "logps/chosen": -86.93299865722656,
      "logps/rejected": -186.15248107910156,
      "loss": 0.587,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.12460210174322128,
      "rewards/margins": 0.23137669265270233,
      "rewards/rejected": -0.10677458345890045,
      "step": 25
    },
    {
      "epoch": 0.3611738148984199,
      "grad_norm": 11.093204498291016,
      "learning_rate": 4.782505135862175e-07,
      "logits/chosen": -0.45945605635643005,
      "logits/rejected": -0.33354875445365906,
      "logps/chosen": -71.20188903808594,
      "logps/rejected": -212.95767211914062,
      "loss": 0.5229,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.16831260919570923,
      "rewards/margins": 0.38181251287460327,
      "rewards/rejected": -0.21349990367889404,
      "step": 30
    },
    {
      "epoch": 0.3611738148984199,
      "eval_logits/chosen": -0.4618959426879883,
      "eval_logits/rejected": -0.3433874249458313,
      "eval_logps/chosen": -98.28858947753906,
      "eval_logps/rejected": -212.0100555419922,
      "eval_loss": 0.5059286952018738,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": 0.18195854127407074,
      "eval_rewards/margins": 0.4181654751300812,
      "eval_rewards/rejected": -0.23620688915252686,
      "eval_runtime": 179.4182,
      "eval_samples_per_second": 3.043,
      "eval_steps_per_second": 1.522,
      "step": 30
    },
    {
      "epoch": 0.4213694507148232,
      "grad_norm": 9.376220703125,
      "learning_rate": 4.6692202414695724e-07,
      "logits/chosen": -0.4632042944431305,
      "logits/rejected": -0.35029542446136475,
      "logps/chosen": -84.06396484375,
      "logps/rejected": -213.848388671875,
      "loss": 0.4976,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.2034817487001419,
      "rewards/margins": 0.4632874131202698,
      "rewards/rejected": -0.25980567932128906,
      "step": 35
    },
    {
      "epoch": 0.4815650865312265,
      "grad_norm": 8.679346084594727,
      "learning_rate": 4.534239241377266e-07,
      "logits/chosen": -0.44362330436706543,
      "logits/rejected": -0.2992916703224182,
      "logps/chosen": -105.2283706665039,
      "logps/rejected": -244.84890747070312,
      "loss": 0.4197,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.22778573632240295,
      "rewards/margins": 0.6910415291786194,
      "rewards/rejected": -0.46325573325157166,
      "step": 40
    },
    {
      "epoch": 0.5417607223476298,
      "grad_norm": 7.219143867492676,
      "learning_rate": 4.3789121884703727e-07,
      "logits/chosen": -0.41270333528518677,
      "logits/rejected": -0.27924439311027527,
      "logps/chosen": -70.08865356445312,
      "logps/rejected": -261.56170654296875,
      "loss": 0.3621,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.23598209023475647,
      "rewards/margins": 0.9187321662902832,
      "rewards/rejected": -0.6827500462532043,
      "step": 45
    },
    {
      "epoch": 0.6019563581640331,
      "grad_norm": 6.640863418579102,
      "learning_rate": 4.204792632772754e-07,
      "logits/chosen": -0.4174782633781433,
      "logits/rejected": -0.2659801244735718,
      "logps/chosen": -109.1211166381836,
      "logps/rejected": -280.77813720703125,
      "loss": 0.3123,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.2913265824317932,
      "rewards/margins": 1.1760694980621338,
      "rewards/rejected": -0.8847430348396301,
      "step": 50
    },
    {
      "epoch": 0.6621519939804364,
      "grad_norm": 5.293730735778809,
      "learning_rate": 4.01362208315132e-07,
      "logits/chosen": -0.4078051447868347,
      "logits/rejected": -0.25378990173339844,
      "logps/chosen": -116.1395492553711,
      "logps/rejected": -301.702392578125,
      "loss": 0.2619,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.3083065152168274,
      "rewards/margins": 1.4346027374267578,
      "rewards/rejected": -1.1262962818145752,
      "step": 55
    },
    {
      "epoch": 0.7223476297968398,
      "grad_norm": 4.923187255859375,
      "learning_rate": 3.807312589093701e-07,
      "logits/chosen": -0.4022981524467468,
      "logits/rejected": -0.2537968158721924,
      "logps/chosen": -103.5102310180664,
      "logps/rejected": -326.17486572265625,
      "loss": 0.2411,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.2954918146133423,
      "rewards/margins": 1.6640812158584595,
      "rewards/rejected": -1.3685895204544067,
      "step": 60
    },
    {
      "epoch": 0.7223476297968398,
      "eval_logits/chosen": -0.406698077917099,
      "eval_logits/rejected": -0.23272451758384705,
      "eval_logps/chosen": -88.900146484375,
      "eval_logps/rejected": -330.7860107421875,
      "eval_loss": 0.2134791761636734,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": 0.27584296464920044,
      "eval_rewards/margins": 1.699809193611145,
      "eval_rewards/rejected": -1.4239662885665894,
      "eval_runtime": 183.6706,
      "eval_samples_per_second": 2.973,
      "eval_steps_per_second": 1.486,
      "step": 60
    },
    {
      "epoch": 0.782543265613243,
      "grad_norm": 4.418694496154785,
      "learning_rate": 3.5879276167728337e-07,
      "logits/chosen": -0.4011690616607666,
      "logits/rejected": -0.22693100571632385,
      "logps/chosen": -56.017845153808594,
      "logps/rejected": -332.90380859375,
      "loss": 0.1992,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.2577177882194519,
      "rewards/margins": 1.7556695938110352,
      "rewards/rejected": -1.497951865196228,
      "step": 65
    },
    {
      "epoch": 0.8427389014296464,
      "grad_norm": 3.794067859649658,
      "learning_rate": 3.357661410672247e-07,
      "logits/chosen": -0.33221831917762756,
      "logits/rejected": -0.1342475712299347,
      "logps/chosen": -74.8525619506836,
      "logps/rejected": -393.6372985839844,
      "loss": 0.1573,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.25318774580955505,
      "rewards/margins": 2.2917141914367676,
      "rewards/rejected": -2.0385265350341797,
      "step": 70
    },
    {
      "epoch": 0.9029345372460497,
      "grad_norm": 3.2060582637786865,
      "learning_rate": 3.1188170471929064e-07,
      "logits/chosen": -0.2731170058250427,
      "logits/rejected": -0.10557065159082413,
      "logps/chosen": -161.33474731445312,
      "logps/rejected": -437.1578063964844,
      "loss": 0.1191,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.2284388542175293,
      "rewards/margins": 2.7684743404388428,
      "rewards/rejected": -2.5400352478027344,
      "step": 75
    },
    {
      "epoch": 0.963130173062453,
      "grad_norm": 1.8243048191070557,
      "learning_rate": 2.8737833997450657e-07,
      "logits/chosen": -0.2729615569114685,
      "logits/rejected": -0.0838087797164917,
      "logps/chosen": -80.7784423828125,
      "logps/rejected": -492.26080322265625,
      "loss": 0.0926,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.21951308846473694,
      "rewards/margins": 3.2957847118377686,
      "rewards/rejected": -3.0762715339660645,
      "step": 80
    },
    {
      "epoch": 1.0233258088788564,
      "grad_norm": 1.6663548946380615,
      "learning_rate": 2.6250112457156293e-07,
      "logits/chosen": -0.2614014744758606,
      "logits/rejected": -0.06510574370622635,
      "logps/chosen": -87.82209777832031,
      "logps/rejected": -556.6070556640625,
      "loss": 0.0775,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.1841917783021927,
      "rewards/margins": 3.8404979705810547,
      "rewards/rejected": -3.656306028366089,
      "step": 85
    },
    {
      "epoch": 1.0835214446952597,
      "grad_norm": 1.4261465072631836,
      "learning_rate": 2.3749887542843707e-07,
      "logits/chosen": -0.26909708976745605,
      "logits/rejected": -0.0703195109963417,
      "logps/chosen": -100.4935531616211,
      "logps/rejected": -598.0264892578125,
      "loss": 0.0634,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.16738824546337128,
      "rewards/margins": 4.255741119384766,
      "rewards/rejected": -4.088352680206299,
      "step": 90
    },
    {
      "epoch": 1.0835214446952597,
      "eval_logits/chosen": -0.25795042514801025,
      "eval_logits/rejected": -0.035701148211956024,
      "eval_logps/chosen": -99.51206970214844,
      "eval_logps/rejected": -607.3591918945312,
      "eval_loss": 0.07514728605747223,
      "eval_rewards/accuracies": 1.0,
      "eval_rewards/chosen": 0.16972379386425018,
      "eval_rewards/margins": 4.359421730041504,
      "eval_rewards/rejected": -4.189698219299316,
      "eval_runtime": 184.8502,
      "eval_samples_per_second": 2.954,
      "eval_steps_per_second": 1.477,
      "step": 90
    },
    {
      "epoch": 1.143717080511663,
      "grad_norm": 1.3145774602890015,
      "learning_rate": 2.126216600254934e-07,
      "logits/chosen": -0.2394520789384842,
      "logits/rejected": -0.023534994572401047,
      "logps/chosen": -150.5535888671875,
      "logps/rejected": -699.5059814453125,
      "loss": 0.0605,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.09343000501394272,
      "rewards/margins": 5.061221122741699,
      "rewards/rejected": -4.967791557312012,
      "step": 95
    },
    {
      "epoch": 1.2039127163280663,
      "grad_norm": 0.628934919834137,
      "learning_rate": 1.8811829528070931e-07,
      "logits/chosen": -0.2859761714935303,
      "logits/rejected": -0.027079975232481956,
      "logps/chosen": -72.18778991699219,
      "logps/rejected": -737.0675048828125,
      "loss": 0.0489,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 0.1680155098438263,
      "rewards/margins": 5.535449028015137,
      "rewards/rejected": -5.367433547973633,
      "step": 100
    }
  ],
  "logging_steps": 5,
  "max_steps": 166,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}