Jason Lee commited on
Commit
3a97bb8
1 Parent(s): 995493b

Model save

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - trl
4
+ - dpo
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: tinyllama-chat-mine-dpo
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # tinyllama-chat-mine-dpo
15
+
16
+ This model was trained from scratch on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.6240
19
+ - Rewards/chosen: -0.7007
20
+ - Rewards/rejected: -0.9684
21
+ - Rewards/accuracies: 0.6825
22
+ - Rewards/margins: 0.2677
23
+ - Logps/rejected: -395.3808
24
+ - Logps/chosen: -412.6868
25
+ - Logits/rejected: -2.7107
26
+ - Logits/chosen: -2.7399
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-07
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - distributed_type: multi-GPU
50
+ - num_devices: 4
51
+ - gradient_accumulation_steps: 4
52
+ - total_train_batch_size: 128
53
+ - total_eval_batch_size: 32
54
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
+ - lr_scheduler_type: cosine
56
+ - lr_scheduler_warmup_ratio: 0.1
57
+ - num_epochs: 1
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6685 | 0.2093 | 100 | 0.6694 | -0.1299 | -0.1943 | 0.6528 | 0.0644 | -317.9682 | -355.6027 | -2.9255 | -2.9507 |
64
+ | 0.642 | 0.4186 | 200 | 0.6407 | -0.4273 | -0.6175 | 0.6726 | 0.1902 | -360.2873 | -385.3470 | -2.7926 | -2.8208 |
65
+ | 0.6285 | 0.6279 | 300 | 0.6331 | -0.4723 | -0.6951 | 0.6647 | 0.2228 | -368.0482 | -389.8438 | -2.7731 | -2.8012 |
66
+ | 0.6222 | 0.8373 | 400 | 0.6240 | -0.7007 | -0.9684 | 0.6825 | 0.2677 | -395.3808 | -412.6868 | -2.7107 | -2.7399 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.43.3
72
+ - Pytorch 2.1.2
73
+ - Datasets 2.20.0
74
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9984301412872841,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.6428392008415558,
5
+ "train_runtime": 6631.0546,
6
+ "train_samples": 61134,
7
+ "train_samples_per_second": 9.219,
8
+ "train_steps_per_second": 0.072
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 2048,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.43.3"
7
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9984301412872841,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.6428392008415558,
5
+ "train_runtime": 6631.0546,
6
+ "train_samples": 61134,
7
+ "train_samples_per_second": 9.219,
8
+ "train_steps_per_second": 0.072
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9984301412872841,
5
+ "eval_steps": 100,
6
+ "global_step": 477,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0020931449502878076,
13
+ "grad_norm": 2.741797685623169,
14
+ "learning_rate": 1.0416666666666666e-08,
15
+ "logits/chosen": -3.1097278594970703,
16
+ "logits/rejected": -3.080122470855713,
17
+ "logps/chosen": -389.3681640625,
18
+ "logps/rejected": -352.00482177734375,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.020931449502878074,
28
+ "grad_norm": 2.746737480163574,
29
+ "learning_rate": 1.0416666666666667e-07,
30
+ "logits/chosen": -2.9152116775512695,
31
+ "logits/rejected": -2.905562400817871,
32
+ "logps/chosen": -323.6693420410156,
33
+ "logps/rejected": -297.4331359863281,
34
+ "loss": 0.6932,
35
+ "rewards/accuracies": 0.4201388955116272,
36
+ "rewards/chosen": -0.00030070680077187717,
37
+ "rewards/margins": -0.00017917003424372524,
38
+ "rewards/rejected": -0.00012153676652815193,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.04186289900575615,
43
+ "grad_norm": 2.543320655822754,
44
+ "learning_rate": 2.0833333333333333e-07,
45
+ "logits/chosen": -2.9843814373016357,
46
+ "logits/rejected": -2.9781064987182617,
47
+ "logps/chosen": -335.30413818359375,
48
+ "logps/rejected": -316.4299621582031,
49
+ "loss": 0.6931,
50
+ "rewards/accuracies": 0.5218750238418579,
51
+ "rewards/chosen": 0.0004041799402330071,
52
+ "rewards/margins": 0.0005227966466918588,
53
+ "rewards/rejected": -0.0001186166555271484,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.06279434850863422,
58
+ "grad_norm": 2.5893585681915283,
59
+ "learning_rate": 3.1249999999999997e-07,
60
+ "logits/chosen": -2.997720241546631,
61
+ "logits/rejected": -2.9677913188934326,
62
+ "logps/chosen": -345.15240478515625,
63
+ "logps/rejected": -295.96966552734375,
64
+ "loss": 0.6928,
65
+ "rewards/accuracies": 0.528124988079071,
66
+ "rewards/chosen": -0.0004641309496946633,
67
+ "rewards/margins": 0.000602933403570205,
68
+ "rewards/rejected": -0.0010670643532648683,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 0.0837257980115123,
73
+ "grad_norm": 2.6134979724884033,
74
+ "learning_rate": 4.1666666666666667e-07,
75
+ "logits/chosen": -2.9335551261901855,
76
+ "logits/rejected": -2.9302399158477783,
77
+ "logps/chosen": -321.5934143066406,
78
+ "logps/rejected": -289.6072998046875,
79
+ "loss": 0.6919,
80
+ "rewards/accuracies": 0.637499988079071,
81
+ "rewards/chosen": -0.0015825815498828888,
82
+ "rewards/margins": 0.0028794072568416595,
83
+ "rewards/rejected": -0.004461988341063261,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 0.10465724751439037,
88
+ "grad_norm": 2.6024513244628906,
89
+ "learning_rate": 4.999731868769026e-07,
90
+ "logits/chosen": -2.9198384284973145,
91
+ "logits/rejected": -2.8993093967437744,
92
+ "logps/chosen": -328.54644775390625,
93
+ "logps/rejected": -309.58087158203125,
94
+ "loss": 0.6901,
95
+ "rewards/accuracies": 0.5562499761581421,
96
+ "rewards/chosen": -0.005653353873640299,
97
+ "rewards/margins": 0.004262409172952175,
98
+ "rewards/rejected": -0.009915763512253761,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.12558869701726844,
103
+ "grad_norm": 2.767859697341919,
104
+ "learning_rate": 4.990353313429303e-07,
105
+ "logits/chosen": -2.982860565185547,
106
+ "logits/rejected": -2.969365119934082,
107
+ "logps/chosen": -307.7032470703125,
108
+ "logps/rejected": -304.3933410644531,
109
+ "loss": 0.6874,
110
+ "rewards/accuracies": 0.578125,
111
+ "rewards/chosen": -0.015346085652709007,
112
+ "rewards/margins": 0.010624411515891552,
113
+ "rewards/rejected": -0.025970498099923134,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 0.14652014652014653,
118
+ "grad_norm": 2.7651147842407227,
119
+ "learning_rate": 4.967625656594781e-07,
120
+ "logits/chosen": -2.9431705474853516,
121
+ "logits/rejected": -2.953648090362549,
122
+ "logps/chosen": -346.7774658203125,
123
+ "logps/rejected": -312.93865966796875,
124
+ "loss": 0.6826,
125
+ "rewards/accuracies": 0.653124988079071,
126
+ "rewards/chosen": -0.04352644830942154,
127
+ "rewards/margins": 0.024388128891587257,
128
+ "rewards/rejected": -0.06791457533836365,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 0.1674515960230246,
133
+ "grad_norm": 2.939892053604126,
134
+ "learning_rate": 4.93167072587771e-07,
135
+ "logits/chosen": -2.973877429962158,
136
+ "logits/rejected": -2.9495885372161865,
137
+ "logps/chosen": -362.0614929199219,
138
+ "logps/rejected": -301.11322021484375,
139
+ "loss": 0.6771,
140
+ "rewards/accuracies": 0.6312500238418579,
141
+ "rewards/chosen": -0.08665237575769424,
142
+ "rewards/margins": 0.0347968153655529,
143
+ "rewards/rejected": -0.12144919484853745,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 0.18838304552590268,
148
+ "grad_norm": 2.9581210613250732,
149
+ "learning_rate": 4.882681251368548e-07,
150
+ "logits/chosen": -2.9634270668029785,
151
+ "logits/rejected": -2.9433810710906982,
152
+ "logps/chosen": -323.24359130859375,
153
+ "logps/rejected": -298.01763916015625,
154
+ "loss": 0.6728,
155
+ "rewards/accuracies": 0.625,
156
+ "rewards/chosen": -0.13328225910663605,
157
+ "rewards/margins": 0.03973008692264557,
158
+ "rewards/rejected": -0.17301234602928162,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 0.20931449502878074,
163
+ "grad_norm": 3.1116480827331543,
164
+ "learning_rate": 4.820919832540181e-07,
165
+ "logits/chosen": -2.9381461143493652,
166
+ "logits/rejected": -2.9422552585601807,
167
+ "logps/chosen": -342.9717102050781,
168
+ "logps/rejected": -310.58758544921875,
169
+ "loss": 0.6685,
170
+ "rewards/accuracies": 0.637499988079071,
171
+ "rewards/chosen": -0.14851507544517517,
172
+ "rewards/margins": 0.057429663836956024,
173
+ "rewards/rejected": -0.2059447318315506,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 0.20931449502878074,
178
+ "eval_logits/chosen": -2.950716972351074,
179
+ "eval_logits/rejected": -2.9254934787750244,
180
+ "eval_logps/chosen": -355.60272216796875,
181
+ "eval_logps/rejected": -317.9681701660156,
182
+ "eval_loss": 0.6694281101226807,
183
+ "eval_rewards/accuracies": 0.6527777910232544,
184
+ "eval_rewards/chosen": -0.1298944652080536,
185
+ "eval_rewards/margins": 0.06442055851221085,
186
+ "eval_rewards/rejected": -0.19431501626968384,
187
+ "eval_runtime": 86.0849,
188
+ "eval_samples_per_second": 23.233,
189
+ "eval_steps_per_second": 0.732,
190
+ "step": 100
191
+ },
192
+ {
193
+ "epoch": 0.2302459445316588,
194
+ "grad_norm": 3.7738163471221924,
195
+ "learning_rate": 4.7467175306295647e-07,
196
+ "logits/chosen": -2.926741123199463,
197
+ "logits/rejected": -2.913757562637329,
198
+ "logps/chosen": -356.9674377441406,
199
+ "logps/rejected": -310.5869140625,
200
+ "loss": 0.6697,
201
+ "rewards/accuracies": 0.606249988079071,
202
+ "rewards/chosen": -0.14377950131893158,
203
+ "rewards/margins": 0.06109999865293503,
204
+ "rewards/rejected": -0.204879492521286,
205
+ "step": 110
206
+ },
207
+ {
208
+ "epoch": 0.25117739403453687,
209
+ "grad_norm": 3.45920467376709,
210
+ "learning_rate": 4.6604720940421207e-07,
211
+ "logits/chosen": -2.854685068130493,
212
+ "logits/rejected": -2.851743459701538,
213
+ "logps/chosen": -349.94189453125,
214
+ "logps/rejected": -327.46820068359375,
215
+ "loss": 0.6557,
216
+ "rewards/accuracies": 0.6625000238418579,
217
+ "rewards/chosen": -0.174159973859787,
218
+ "rewards/margins": 0.08476099371910095,
219
+ "rewards/rejected": -0.25892096757888794,
220
+ "step": 120
221
+ },
222
+ {
223
+ "epoch": 0.272108843537415,
224
+ "grad_norm": 4.213115692138672,
225
+ "learning_rate": 4.5626458262912735e-07,
226
+ "logits/chosen": -2.8723671436309814,
227
+ "logits/rejected": -2.8682103157043457,
228
+ "logps/chosen": -364.5023498535156,
229
+ "logps/rejected": -344.17413330078125,
230
+ "loss": 0.65,
231
+ "rewards/accuracies": 0.640625,
232
+ "rewards/chosen": -0.26356348395347595,
233
+ "rewards/margins": 0.08755739033222198,
234
+ "rewards/rejected": -0.35112088918685913,
235
+ "step": 130
236
+ },
237
+ {
238
+ "epoch": 0.29304029304029305,
239
+ "grad_norm": 4.351049900054932,
240
+ "learning_rate": 4.453763107901675e-07,
241
+ "logits/chosen": -2.8395862579345703,
242
+ "logits/rejected": -2.8383350372314453,
243
+ "logps/chosen": -386.54754638671875,
244
+ "logps/rejected": -353.8211975097656,
245
+ "loss": 0.6576,
246
+ "rewards/accuracies": 0.643750011920929,
247
+ "rewards/chosen": -0.30667954683303833,
248
+ "rewards/margins": 0.11848233640193939,
249
+ "rewards/rejected": -0.4251618981361389,
250
+ "step": 140
251
+ },
252
+ {
253
+ "epoch": 0.3139717425431711,
254
+ "grad_norm": 5.293145656585693,
255
+ "learning_rate": 4.3344075855595097e-07,
256
+ "logits/chosen": -2.837963581085205,
257
+ "logits/rejected": -2.8277411460876465,
258
+ "logps/chosen": -363.89532470703125,
259
+ "logps/rejected": -334.4164733886719,
260
+ "loss": 0.6562,
261
+ "rewards/accuracies": 0.5874999761581421,
262
+ "rewards/chosen": -0.3559018671512604,
263
+ "rewards/margins": 0.0798763707280159,
264
+ "rewards/rejected": -0.43577829003334045,
265
+ "step": 150
266
+ },
267
+ {
268
+ "epoch": 0.3349031920460492,
269
+ "grad_norm": 5.583098411560059,
270
+ "learning_rate": 4.2052190435769554e-07,
271
+ "logits/chosen": -2.8631508350372314,
272
+ "logits/rejected": -2.8266890048980713,
273
+ "logps/chosen": -359.99200439453125,
274
+ "logps/rejected": -320.49151611328125,
275
+ "loss": 0.6453,
276
+ "rewards/accuracies": 0.628125011920929,
277
+ "rewards/chosen": -0.34305575489997864,
278
+ "rewards/margins": 0.13357409834861755,
279
+ "rewards/rejected": -0.4766298234462738,
280
+ "step": 160
281
+ },
282
+ {
283
+ "epoch": 0.35583464154892724,
284
+ "grad_norm": 5.565986633300781,
285
+ "learning_rate": 4.0668899744407567e-07,
286
+ "logits/chosen": -2.800814628601074,
287
+ "logits/rejected": -2.7905514240264893,
288
+ "logps/chosen": -349.87725830078125,
289
+ "logps/rejected": -323.04150390625,
290
+ "loss": 0.6516,
291
+ "rewards/accuracies": 0.606249988079071,
292
+ "rewards/chosen": -0.4009243845939636,
293
+ "rewards/margins": 0.12320031225681305,
294
+ "rewards/rejected": -0.5241247415542603,
295
+ "step": 170
296
+ },
297
+ {
298
+ "epoch": 0.37676609105180536,
299
+ "grad_norm": 6.890281677246094,
300
+ "learning_rate": 3.920161866827889e-07,
301
+ "logits/chosen": -2.8282012939453125,
302
+ "logits/rejected": -2.8248977661132812,
303
+ "logps/chosen": -382.04364013671875,
304
+ "logps/rejected": -350.888427734375,
305
+ "loss": 0.64,
306
+ "rewards/accuracies": 0.6312500238418579,
307
+ "rewards/chosen": -0.4164988100528717,
308
+ "rewards/margins": 0.15513385832309723,
309
+ "rewards/rejected": -0.5716326832771301,
310
+ "step": 180
311
+ },
312
+ {
313
+ "epoch": 0.3976975405546834,
314
+ "grad_norm": 5.489590167999268,
315
+ "learning_rate": 3.765821230985757e-07,
316
+ "logits/chosen": -2.8130276203155518,
317
+ "logits/rejected": -2.7892467975616455,
318
+ "logps/chosen": -350.63873291015625,
319
+ "logps/rejected": -333.55718994140625,
320
+ "loss": 0.6408,
321
+ "rewards/accuracies": 0.6000000238418579,
322
+ "rewards/chosen": -0.3725319802761078,
323
+ "rewards/margins": 0.13300345838069916,
324
+ "rewards/rejected": -0.5055354833602905,
325
+ "step": 190
326
+ },
327
+ {
328
+ "epoch": 0.4186289900575615,
329
+ "grad_norm": 6.967275142669678,
330
+ "learning_rate": 3.604695382782159e-07,
331
+ "logits/chosen": -2.787351608276367,
332
+ "logits/rejected": -2.789077043533325,
333
+ "logps/chosen": -370.17864990234375,
334
+ "logps/rejected": -361.43988037109375,
335
+ "loss": 0.642,
336
+ "rewards/accuracies": 0.637499988079071,
337
+ "rewards/chosen": -0.38552746176719666,
338
+ "rewards/margins": 0.16969361901283264,
339
+ "rewards/rejected": -0.5552210807800293,
340
+ "step": 200
341
+ },
342
+ {
343
+ "epoch": 0.4186289900575615,
344
+ "eval_logits/chosen": -2.8207905292510986,
345
+ "eval_logits/rejected": -2.7925562858581543,
346
+ "eval_logps/chosen": -385.3469543457031,
347
+ "eval_logps/rejected": -360.2872619628906,
348
+ "eval_loss": 0.6406751871109009,
349
+ "eval_rewards/accuracies": 0.6726190447807312,
350
+ "eval_rewards/chosen": -0.4273369312286377,
351
+ "eval_rewards/margins": 0.19016937911510468,
352
+ "eval_rewards/rejected": -0.6175063252449036,
353
+ "eval_runtime": 86.2297,
354
+ "eval_samples_per_second": 23.194,
355
+ "eval_steps_per_second": 0.731,
356
+ "step": 200
357
+ },
358
+ {
359
+ "epoch": 0.43956043956043955,
360
+ "grad_norm": 8.497602462768555,
361
+ "learning_rate": 3.4376480090239047e-07,
362
+ "logits/chosen": -2.7831473350524902,
363
+ "logits/rejected": -2.7467095851898193,
364
+ "logps/chosen": -387.8429870605469,
365
+ "logps/rejected": -351.0397644042969,
366
+ "loss": 0.6471,
367
+ "rewards/accuracies": 0.637499988079071,
368
+ "rewards/chosen": -0.5111196041107178,
369
+ "rewards/margins": 0.1388254463672638,
370
+ "rewards/rejected": -0.649945080280304,
371
+ "step": 210
372
+ },
373
+ {
374
+ "epoch": 0.4604918890633176,
375
+ "grad_norm": 6.599562168121338,
376
+ "learning_rate": 3.265574537815398e-07,
377
+ "logits/chosen": -2.7829642295837402,
378
+ "logits/rejected": -2.7764458656311035,
379
+ "logps/chosen": -379.44464111328125,
380
+ "logps/rejected": -354.76092529296875,
381
+ "loss": 0.6423,
382
+ "rewards/accuracies": 0.637499988079071,
383
+ "rewards/chosen": -0.49129724502563477,
384
+ "rewards/margins": 0.16036386787891388,
385
+ "rewards/rejected": -0.6516611576080322,
386
+ "step": 220
387
+ },
388
+ {
389
+ "epoch": 0.48142333856619574,
390
+ "grad_norm": 6.966579914093018,
391
+ "learning_rate": 3.0893973387735683e-07,
392
+ "logits/chosen": -2.7712459564208984,
393
+ "logits/rejected": -2.760551929473877,
394
+ "logps/chosen": -370.9303283691406,
395
+ "logps/rejected": -359.6304626464844,
396
+ "loss": 0.6429,
397
+ "rewards/accuracies": 0.637499988079071,
398
+ "rewards/chosen": -0.436328649520874,
399
+ "rewards/margins": 0.17412248253822327,
400
+ "rewards/rejected": -0.6104511022567749,
401
+ "step": 230
402
+ },
403
+ {
404
+ "epoch": 0.5023547880690737,
405
+ "grad_norm": 6.429652690887451,
406
+ "learning_rate": 2.910060778827554e-07,
407
+ "logits/chosen": -2.7925572395324707,
408
+ "logits/rejected": -2.7672150135040283,
409
+ "logps/chosen": -370.3599548339844,
410
+ "logps/rejected": -349.8984680175781,
411
+ "loss": 0.6292,
412
+ "rewards/accuracies": 0.671875,
413
+ "rewards/chosen": -0.4170507490634918,
414
+ "rewards/margins": 0.2068098783493042,
415
+ "rewards/rejected": -0.6238606572151184,
416
+ "step": 240
417
+ },
418
+ {
419
+ "epoch": 0.5232862375719518,
420
+ "grad_norm": 6.210602283477783,
421
+ "learning_rate": 2.7285261601056697e-07,
422
+ "logits/chosen": -2.791151285171509,
423
+ "logits/rejected": -2.761967897415161,
424
+ "logps/chosen": -386.1891784667969,
425
+ "logps/rejected": -342.8139343261719,
426
+ "loss": 0.6327,
427
+ "rewards/accuracies": 0.6781250238418579,
428
+ "rewards/chosen": -0.42713984847068787,
429
+ "rewards/margins": 0.22475413978099823,
430
+ "rewards/rejected": -0.6518939137458801,
431
+ "step": 250
432
+ },
433
+ {
434
+ "epoch": 0.54421768707483,
435
+ "grad_norm": 6.485888481140137,
436
+ "learning_rate": 2.5457665670441937e-07,
437
+ "logits/chosen": -2.8333914279937744,
438
+ "logits/rejected": -2.8468918800354004,
439
+ "logps/chosen": -381.37078857421875,
440
+ "logps/rejected": -365.30963134765625,
441
+ "loss": 0.6332,
442
+ "rewards/accuracies": 0.684374988079071,
443
+ "rewards/chosen": -0.45365315675735474,
444
+ "rewards/margins": 0.20745711028575897,
445
+ "rewards/rejected": -0.6611102223396301,
446
+ "step": 260
447
+ },
448
+ {
449
+ "epoch": 0.565149136577708,
450
+ "grad_norm": 7.725765705108643,
451
+ "learning_rate": 2.3627616503391812e-07,
452
+ "logits/chosen": -2.8207736015319824,
453
+ "logits/rejected": -2.8081822395324707,
454
+ "logps/chosen": -386.81976318359375,
455
+ "logps/rejected": -361.7131652832031,
456
+ "loss": 0.6314,
457
+ "rewards/accuracies": 0.659375011920929,
458
+ "rewards/chosen": -0.44246116280555725,
459
+ "rewards/margins": 0.18473473191261292,
460
+ "rewards/rejected": -0.6271958351135254,
461
+ "step": 270
462
+ },
463
+ {
464
+ "epoch": 0.5860805860805861,
465
+ "grad_norm": 7.315251350402832,
466
+ "learning_rate": 2.1804923757009882e-07,
467
+ "logits/chosen": -2.7474558353424072,
468
+ "logits/rejected": -2.7452392578125,
469
+ "logps/chosen": -373.4273681640625,
470
+ "logps/rejected": -346.6417541503906,
471
+ "loss": 0.6341,
472
+ "rewards/accuracies": 0.671875,
473
+ "rewards/chosen": -0.4729071259498596,
474
+ "rewards/margins": 0.2166450470685959,
475
+ "rewards/rejected": -0.6895521879196167,
476
+ "step": 280
477
+ },
478
+ {
479
+ "epoch": 0.6070120355834642,
480
+ "grad_norm": 7.481057643890381,
481
+ "learning_rate": 1.9999357655598891e-07,
482
+ "logits/chosen": -2.8017992973327637,
483
+ "logits/rejected": -2.7807507514953613,
484
+ "logps/chosen": -372.48126220703125,
485
+ "logps/rejected": -341.9537048339844,
486
+ "loss": 0.6264,
487
+ "rewards/accuracies": 0.675000011920929,
488
+ "rewards/chosen": -0.5083416700363159,
489
+ "rewards/margins": 0.176277756690979,
490
+ "rewards/rejected": -0.6846194863319397,
491
+ "step": 290
492
+ },
493
+ {
494
+ "epoch": 0.6279434850863422,
495
+ "grad_norm": 8.147472381591797,
496
+ "learning_rate": 1.8220596619089573e-07,
497
+ "logits/chosen": -2.7440390586853027,
498
+ "logits/rejected": -2.717979907989502,
499
+ "logps/chosen": -375.93035888671875,
500
+ "logps/rejected": -361.93121337890625,
501
+ "loss": 0.6285,
502
+ "rewards/accuracies": 0.625,
503
+ "rewards/chosen": -0.4719668924808502,
504
+ "rewards/margins": 0.17040875554084778,
505
+ "rewards/rejected": -0.642375648021698,
506
+ "step": 300
507
+ },
508
+ {
509
+ "epoch": 0.6279434850863422,
510
+ "eval_logits/chosen": -2.801197052001953,
511
+ "eval_logits/rejected": -2.773117780685425,
512
+ "eval_logps/chosen": -389.8437805175781,
513
+ "eval_logps/rejected": -368.0482177734375,
514
+ "eval_loss": 0.6331161856651306,
515
+ "eval_rewards/accuracies": 0.6646825671195984,
516
+ "eval_rewards/chosen": -0.4723050594329834,
517
+ "eval_rewards/margins": 0.22281017899513245,
518
+ "eval_rewards/rejected": -0.6951152086257935,
519
+ "eval_runtime": 85.9334,
520
+ "eval_samples_per_second": 23.274,
521
+ "eval_steps_per_second": 0.733,
522
+ "step": 300
523
+ },
524
+ {
525
+ "epoch": 0.6488749345892203,
526
+ "grad_norm": 11.20168399810791,
527
+ "learning_rate": 1.647817538357072e-07,
528
+ "logits/chosen": -2.736398935317993,
529
+ "logits/rejected": -2.738858461380005,
530
+ "logps/chosen": -391.0700378417969,
531
+ "logps/rejected": -347.84356689453125,
532
+ "loss": 0.622,
533
+ "rewards/accuracies": 0.675000011920929,
534
+ "rewards/chosen": -0.4913768768310547,
535
+ "rewards/margins": 0.23215806484222412,
536
+ "rewards/rejected": -0.723534882068634,
537
+ "step": 310
538
+ },
539
+ {
540
+ "epoch": 0.6698063840920984,
541
+ "grad_norm": 7.335338592529297,
542
+ "learning_rate": 1.478143389201113e-07,
543
+ "logits/chosen": -2.7694268226623535,
544
+ "logits/rejected": -2.74495267868042,
545
+ "logps/chosen": -382.05419921875,
546
+ "logps/rejected": -351.9586486816406,
547
+ "loss": 0.6179,
548
+ "rewards/accuracies": 0.637499988079071,
549
+ "rewards/chosen": -0.5782755017280579,
550
+ "rewards/margins": 0.24550530314445496,
551
+ "rewards/rejected": -0.8237808346748352,
552
+ "step": 320
553
+ },
554
+ {
555
+ "epoch": 0.6907378335949764,
556
+ "grad_norm": 9.174397468566895,
557
+ "learning_rate": 1.3139467229135998e-07,
558
+ "logits/chosen": -2.7542152404785156,
559
+ "logits/rejected": -2.74013352394104,
560
+ "logps/chosen": -391.40618896484375,
561
+ "logps/rejected": -386.9635314941406,
562
+ "loss": 0.63,
563
+ "rewards/accuracies": 0.609375,
564
+ "rewards/chosen": -0.6302947998046875,
565
+ "rewards/margins": 0.17986619472503662,
566
+ "rewards/rejected": -0.8101609945297241,
567
+ "step": 330
568
+ },
569
+ {
570
+ "epoch": 0.7116692830978545,
571
+ "grad_norm": 8.58338451385498,
572
+ "learning_rate": 1.1561076868822755e-07,
573
+ "logits/chosen": -2.75496768951416,
574
+ "logits/rejected": -2.7243494987487793,
575
+ "logps/chosen": -427.29315185546875,
576
+ "logps/rejected": -422.3783264160156,
577
+ "loss": 0.6206,
578
+ "rewards/accuracies": 0.659375011920929,
579
+ "rewards/chosen": -0.6146808862686157,
580
+ "rewards/margins": 0.28181296586990356,
581
+ "rewards/rejected": -0.8964937925338745,
582
+ "step": 340
583
+ },
584
+ {
585
+ "epoch": 0.7326007326007326,
586
+ "grad_norm": 7.745279788970947,
587
+ "learning_rate": 1.0054723495346482e-07,
588
+ "logits/chosen": -2.7472786903381348,
589
+ "logits/rejected": -2.727072238922119,
590
+ "logps/chosen": -380.8338623046875,
591
+ "logps/rejected": -356.40875244140625,
592
+ "loss": 0.6116,
593
+ "rewards/accuracies": 0.6656249761581421,
594
+ "rewards/chosen": -0.6483052968978882,
595
+ "rewards/margins": 0.22691123187541962,
596
+ "rewards/rejected": -0.8752166628837585,
597
+ "step": 350
598
+ },
599
+ {
600
+ "epoch": 0.7535321821036107,
601
+ "grad_norm": 8.188222885131836,
602
+ "learning_rate": 8.628481651367875e-08,
603
+ "logits/chosen": -2.7281575202941895,
604
+ "logits/rejected": -2.714717388153076,
605
+ "logps/chosen": -399.79217529296875,
606
+ "logps/rejected": -374.2989807128906,
607
+ "loss": 0.6313,
608
+ "rewards/accuracies": 0.6625000238418579,
609
+ "rewards/chosen": -0.7062140107154846,
610
+ "rewards/margins": 0.22601504623889923,
611
+ "rewards/rejected": -0.9322290420532227,
612
+ "step": 360
613
+ },
614
+ {
615
+ "epoch": 0.7744636316064888,
616
+ "grad_norm": 7.2593889236450195,
617
+ "learning_rate": 7.289996455765748e-08,
618
+ "logits/chosen": -2.753688097000122,
619
+ "logits/rejected": -2.7259750366210938,
620
+ "logps/chosen": -383.2185974121094,
621
+ "logps/rejected": -367.9438781738281,
622
+ "loss": 0.6152,
623
+ "rewards/accuracies": 0.637499988079071,
624
+ "rewards/chosen": -0.7225135564804077,
625
+ "rewards/margins": 0.21898405253887177,
626
+ "rewards/rejected": -0.9414976239204407,
627
+ "step": 370
628
+ },
629
+ {
630
+ "epoch": 0.7953950811093669,
631
+ "grad_norm": 7.146636009216309,
632
+ "learning_rate": 6.046442623320145e-08,
633
+ "logits/chosen": -2.6859023571014404,
634
+ "logits/rejected": -2.695509433746338,
635
+ "logps/chosen": -363.49493408203125,
636
+ "logps/rejected": -377.5242004394531,
637
+ "loss": 0.6159,
638
+ "rewards/accuracies": 0.6656249761581421,
639
+ "rewards/chosen": -0.7377739548683167,
640
+ "rewards/margins": 0.25747013092041016,
641
+ "rewards/rejected": -0.995244026184082,
642
+ "step": 380
643
+ },
644
+ {
645
+ "epoch": 0.8163265306122449,
646
+ "grad_norm": 7.638937473297119,
647
+ "learning_rate": 4.904486005914027e-08,
648
+ "logits/chosen": -2.6865615844726562,
649
+ "logits/rejected": -2.6732540130615234,
650
+ "logps/chosen": -433.0126037597656,
651
+ "logps/rejected": -432.28448486328125,
652
+ "loss": 0.6203,
653
+ "rewards/accuracies": 0.6499999761581421,
654
+ "rewards/chosen": -0.6989966034889221,
655
+ "rewards/margins": 0.23467639088630676,
656
+ "rewards/rejected": -0.9336729049682617,
657
+ "step": 390
658
+ },
659
+ {
660
+ "epoch": 0.837257980115123,
661
+ "grad_norm": 8.645991325378418,
662
+ "learning_rate": 3.8702478614051345e-08,
663
+ "logits/chosen": -2.6967921257019043,
664
+ "logits/rejected": -2.6760878562927246,
665
+ "logps/chosen": -396.04595947265625,
666
+ "logps/rejected": -385.61810302734375,
667
+ "loss": 0.6222,
668
+ "rewards/accuracies": 0.6781250238418579,
669
+ "rewards/chosen": -0.674105167388916,
670
+ "rewards/margins": 0.25409311056137085,
671
+ "rewards/rejected": -0.9281982183456421,
672
+ "step": 400
673
+ },
674
+ {
675
+ "epoch": 0.837257980115123,
676
+ "eval_logits/chosen": -2.7399401664733887,
677
+ "eval_logits/rejected": -2.7107229232788086,
678
+ "eval_logps/chosen": -412.686767578125,
679
+ "eval_logps/rejected": -395.38079833984375,
680
+ "eval_loss": 0.6240472197532654,
681
+ "eval_rewards/accuracies": 0.682539701461792,
682
+ "eval_rewards/chosen": -0.7007347345352173,
683
+ "eval_rewards/margins": 0.2677067816257477,
684
+ "eval_rewards/rejected": -0.9684414863586426,
685
+ "eval_runtime": 86.0349,
686
+ "eval_samples_per_second": 23.246,
687
+ "eval_steps_per_second": 0.732,
688
+ "step": 400
689
+ },
690
+ {
691
+ "epoch": 0.858189429618001,
692
+ "grad_norm": 8.63017749786377,
693
+ "learning_rate": 2.9492720416985e-08,
694
+ "logits/chosen": -2.72432279586792,
695
+ "logits/rejected": -2.709479808807373,
696
+ "logps/chosen": -423.2232971191406,
697
+ "logps/rejected": -398.17779541015625,
698
+ "loss": 0.6347,
699
+ "rewards/accuracies": 0.6499999761581421,
700
+ "rewards/chosen": -0.6851434111595154,
701
+ "rewards/margins": 0.2306232750415802,
702
+ "rewards/rejected": -0.915766716003418,
703
+ "step": 410
704
+ },
705
+ {
706
+ "epoch": 0.8791208791208791,
707
+ "grad_norm": 8.239731788635254,
708
+ "learning_rate": 2.1464952759020856e-08,
709
+ "logits/chosen": -2.6693649291992188,
710
+ "logits/rejected": -2.672886371612549,
711
+ "logps/chosen": -392.8204040527344,
712
+ "logps/rejected": -393.4344177246094,
713
+ "loss": 0.6118,
714
+ "rewards/accuracies": 0.640625,
715
+ "rewards/chosen": -0.7332924008369446,
716
+ "rewards/margins": 0.24412448704242706,
717
+ "rewards/rejected": -0.9774168133735657,
718
+ "step": 420
719
+ },
720
+ {
721
+ "epoch": 0.9000523286237572,
722
+ "grad_norm": 8.310417175292969,
723
+ "learning_rate": 1.4662207078575684e-08,
724
+ "logits/chosen": -2.6678881645202637,
725
+ "logits/rejected": -2.6358609199523926,
726
+ "logps/chosen": -409.0691833496094,
727
+ "logps/rejected": -372.57183837890625,
728
+ "loss": 0.6136,
729
+ "rewards/accuracies": 0.653124988079071,
730
+ "rewards/chosen": -0.7174139618873596,
731
+ "rewards/margins": 0.21988801658153534,
732
+ "rewards/rejected": -0.9373019933700562,
733
+ "step": 430
734
+ },
735
+ {
736
+ "epoch": 0.9209837781266352,
737
+ "grad_norm": 13.682318687438965,
738
+ "learning_rate": 9.12094829893642e-09,
739
+ "logits/chosen": -2.718200445175171,
740
+ "logits/rejected": -2.689492702484131,
741
+ "logps/chosen": -413.35528564453125,
742
+ "logps/rejected": -406.2598876953125,
743
+ "loss": 0.6068,
744
+ "rewards/accuracies": 0.625,
745
+ "rewards/chosen": -0.7059027552604675,
746
+ "rewards/margins": 0.23966805636882782,
747
+ "rewards/rejected": -0.9455708265304565,
748
+ "step": 440
749
+ },
750
+ {
751
+ "epoch": 0.9419152276295133,
752
+ "grad_norm": 8.156304359436035,
753
+ "learning_rate": 4.8708793644441086e-09,
754
+ "logits/chosen": -2.63850736618042,
755
+ "logits/rejected": -2.6505093574523926,
756
+ "logps/chosen": -413.0054626464844,
757
+ "logps/rejected": -420.00018310546875,
758
+ "loss": 0.6209,
759
+ "rewards/accuracies": 0.671875,
760
+ "rewards/chosen": -0.7106130719184875,
761
+ "rewards/margins": 0.24646687507629395,
762
+ "rewards/rejected": -0.957080066204071,
763
+ "step": 450
764
+ },
765
+ {
766
+ "epoch": 0.9628466771323915,
767
+ "grad_norm": 10.356741905212402,
768
+ "learning_rate": 1.9347820230782295e-09,
769
+ "logits/chosen": -2.698603630065918,
770
+ "logits/rejected": -2.671870231628418,
771
+ "logps/chosen": -374.42828369140625,
772
+ "logps/rejected": -359.28692626953125,
773
+ "loss": 0.6226,
774
+ "rewards/accuracies": 0.643750011920929,
775
+ "rewards/chosen": -0.7496173977851868,
776
+ "rewards/margins": 0.20760098099708557,
777
+ "rewards/rejected": -0.95721834897995,
778
+ "step": 460
779
+ },
780
+ {
781
+ "epoch": 0.9837781266352695,
782
+ "grad_norm": 9.971595764160156,
783
+ "learning_rate": 3.2839470889836627e-10,
784
+ "logits/chosen": -2.7027974128723145,
785
+ "logits/rejected": -2.684480905532837,
786
+ "logps/chosen": -412.5672302246094,
787
+ "logps/rejected": -383.63372802734375,
788
+ "loss": 0.6113,
789
+ "rewards/accuracies": 0.640625,
790
+ "rewards/chosen": -0.7107462882995605,
791
+ "rewards/margins": 0.1883649080991745,
792
+ "rewards/rejected": -0.8991111516952515,
793
+ "step": 470
794
+ },
795
+ {
796
+ "epoch": 0.9984301412872841,
797
+ "step": 477,
798
+ "total_flos": 0.0,
799
+ "train_loss": 0.6428392008415558,
800
+ "train_runtime": 6631.0546,
801
+ "train_samples_per_second": 9.219,
802
+ "train_steps_per_second": 0.072
803
+ }
804
+ ],
805
+ "logging_steps": 10,
806
+ "max_steps": 477,
807
+ "num_input_tokens_seen": 0,
808
+ "num_train_epochs": 1,
809
+ "save_steps": 100,
810
+ "stateful_callbacks": {
811
+ "TrainerControl": {
812
+ "args": {
813
+ "should_epoch_stop": false,
814
+ "should_evaluate": false,
815
+ "should_log": false,
816
+ "should_save": true,
817
+ "should_training_stop": true
818
+ },
819
+ "attributes": {}
820
+ }
821
+ },
822
+ "total_flos": 0.0,
823
+ "train_batch_size": 8,
824
+ "trial_name": null,
825
+ "trial_params": null
826
+ }