yiran-wang3 commited on
Commit
3251614
1 Parent(s): 5db277f

End of training

Browse files
README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: yiran-wang3/qwen2_chat_reflct_adamw_iter3
5
+ tags:
6
+ - alignment-handbook
7
+ - generated_from_trainer
8
+ - trl
9
+ - dpo
10
+ datasets:
11
+ - self-generate/qw2_reflct_sppo_hard_new_cn_mining_oj_iter3-binarized-reflection-scored
12
+ model-index:
13
+ - name: qwen2_chat_reflct_adamw_iter4
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # qwen2_chat_reflct_adamw_iter4
21
+
22
+ This model is a fine-tuned version of [yiran-wang3/qwen2_chat_reflct_adamw_iter3](https://huggingface.co/yiran-wang3/qwen2_chat_reflct_adamw_iter3) on the self-generate/qw2_reflct_sppo_hard_new_cn_mining_oj_iter3-binarized-reflection-scored dataset.
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 1e-06
42
+ - train_batch_size: 8
43
+ - eval_batch_size: 4
44
+ - seed: 42
45
+ - distributed_type: multi-GPU
46
+ - num_devices: 8
47
+ - total_train_batch_size: 64
48
+ - total_eval_batch_size: 32
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: constant
51
+ - lr_scheduler_warmup_ratio: 0.1
52
+ - lr_scheduler_warmup_steps: 100
53
+ - num_epochs: 1.0
54
+
55
+ ### Training results
56
+
57
+
58
+
59
+ ### Framework versions
60
+
61
+ - Transformers 4.45.0
62
+ - Pytorch 2.4.0+cu121
63
+ - Datasets 2.14.6
64
+ - Tokenizers 0.20.2
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.489490317743878,
5
+ "train_runtime": 149.7969,
6
+ "train_samples": 2744,
7
+ "train_samples_per_second": 18.318,
8
+ "train_steps_per_second": 0.287
9
+ }
config.json CHANGED
@@ -23,7 +23,7 @@
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.45.0",
26
- "use_cache": false,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
 
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.45.0",
26
+ "use_cache": true,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.45.0"
14
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.489490317743878,
5
+ "train_runtime": 149.7969,
6
+ "train_samples": 2744,
7
+ "train_samples_per_second": 18.318,
8
+ "train_steps_per_second": 0.287
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 43,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "debug/policy_chosen_logits": -1.5687581300735474,
13
+ "debug/policy_chosen_logps": -240.2513427734375,
14
+ "debug/policy_rejected_logits": -1.6221139430999756,
15
+ "debug/policy_rejected_logps": -264.4752197265625,
16
+ "debug/reference_chosen_logps": -240.2513427734375,
17
+ "debug/reference_rejected_logps": -264.4752197265625,
18
+ "epoch": 0.023255813953488372,
19
+ "grad_norm": 14.314275545525218,
20
+ "learning_rate": 1e-06,
21
+ "logits/chosen": -1.5687581300735474,
22
+ "logits/rejected": -1.6221139430999756,
23
+ "logps/chosen": -240.2513427734375,
24
+ "logps/rejected": -264.4752197265625,
25
+ "loss": 0.5,
26
+ "rewards/accuracies": 0.0,
27
+ "rewards/chosen": 0.0,
28
+ "rewards/margins": 0.0,
29
+ "rewards/rejected": 0.0,
30
+ "step": 1
31
+ },
32
+ {
33
+ "debug/policy_chosen_logits": -1.4775172472000122,
34
+ "debug/policy_chosen_logps": -235.343994140625,
35
+ "debug/policy_rejected_logits": -1.3492165803909302,
36
+ "debug/policy_rejected_logps": -283.1033935546875,
37
+ "debug/reference_chosen_logps": -234.93467712402344,
38
+ "debug/reference_rejected_logps": -283.2170104980469,
39
+ "epoch": 0.046511627906976744,
40
+ "grad_norm": 18.67922014806989,
41
+ "learning_rate": 1e-06,
42
+ "logits/chosen": -1.4775172472000122,
43
+ "logits/rejected": -1.3492165803909302,
44
+ "logps/chosen": -235.343994140625,
45
+ "logps/rejected": -283.1033935546875,
46
+ "loss": 0.4959,
47
+ "rewards/accuracies": 0.25,
48
+ "rewards/chosen": -0.004093170166015625,
49
+ "rewards/margins": -0.005229205824434757,
50
+ "rewards/rejected": 0.0011360361240804195,
51
+ "step": 2
52
+ },
53
+ {
54
+ "debug/policy_chosen_logits": -1.6865235567092896,
55
+ "debug/policy_chosen_logps": -230.66635131835938,
56
+ "debug/policy_rejected_logits": -1.6258912086486816,
57
+ "debug/policy_rejected_logps": -228.0758514404297,
58
+ "debug/reference_chosen_logps": -225.64306640625,
59
+ "debug/reference_rejected_logps": -223.4805908203125,
60
+ "epoch": 0.06976744186046512,
61
+ "grad_norm": 25.946369831783773,
62
+ "learning_rate": 1e-06,
63
+ "logits/chosen": -1.6865235567092896,
64
+ "logits/rejected": -1.6258912086486816,
65
+ "logps/chosen": -230.66635131835938,
66
+ "logps/rejected": -228.0758514404297,
67
+ "loss": 0.5069,
68
+ "rewards/accuracies": 0.5,
69
+ "rewards/chosen": -0.050232600420713425,
70
+ "rewards/margins": -0.004280166234821081,
71
+ "rewards/rejected": -0.04595243185758591,
72
+ "step": 3
73
+ },
74
+ {
75
+ "debug/policy_chosen_logits": -1.6345511674880981,
76
+ "debug/policy_chosen_logps": -230.35598754882812,
77
+ "debug/policy_rejected_logits": -1.594412088394165,
78
+ "debug/policy_rejected_logps": -235.36544799804688,
79
+ "debug/reference_chosen_logps": -227.8475799560547,
80
+ "debug/reference_rejected_logps": -230.77169799804688,
81
+ "epoch": 0.09302325581395349,
82
+ "grad_norm": 12.562991726069878,
83
+ "learning_rate": 1e-06,
84
+ "logits/chosen": -1.6345511674880981,
85
+ "logits/rejected": -1.594412088394165,
86
+ "logps/chosen": -230.35598754882812,
87
+ "logps/rejected": -235.36544799804688,
88
+ "loss": 0.4907,
89
+ "rewards/accuracies": 0.875,
90
+ "rewards/chosen": -0.025084247812628746,
91
+ "rewards/margins": 0.02085309848189354,
92
+ "rewards/rejected": -0.045937344431877136,
93
+ "step": 4
94
+ },
95
+ {
96
+ "debug/policy_chosen_logits": -1.5187644958496094,
97
+ "debug/policy_chosen_logps": -209.38815307617188,
98
+ "debug/policy_rejected_logits": -1.5565170049667358,
99
+ "debug/policy_rejected_logps": -261.0048522949219,
100
+ "debug/reference_chosen_logps": -204.9683837890625,
101
+ "debug/reference_rejected_logps": -256.2153015136719,
102
+ "epoch": 0.11627906976744186,
103
+ "grad_norm": 42.7709320228073,
104
+ "learning_rate": 1e-06,
105
+ "logits/chosen": -1.5187644958496094,
106
+ "logits/rejected": -1.5565170049667358,
107
+ "logps/chosen": -209.38815307617188,
108
+ "logps/rejected": -261.0048522949219,
109
+ "loss": 0.5197,
110
+ "rewards/accuracies": 0.625,
111
+ "rewards/chosen": -0.04419763758778572,
112
+ "rewards/margins": 0.0036978721618652344,
113
+ "rewards/rejected": -0.04789550602436066,
114
+ "step": 5
115
+ },
116
+ {
117
+ "debug/policy_chosen_logits": -1.654346227645874,
118
+ "debug/policy_chosen_logps": -208.22152709960938,
119
+ "debug/policy_rejected_logits": -1.472536325454712,
120
+ "debug/policy_rejected_logps": -277.9122314453125,
121
+ "debug/reference_chosen_logps": -208.6928253173828,
122
+ "debug/reference_rejected_logps": -277.05023193359375,
123
+ "epoch": 0.13953488372093023,
124
+ "grad_norm": 18.148530267479675,
125
+ "learning_rate": 1e-06,
126
+ "logits/chosen": -1.654346227645874,
127
+ "logits/rejected": -1.472536325454712,
128
+ "logps/chosen": -208.22152709960938,
129
+ "logps/rejected": -277.9122314453125,
130
+ "loss": 0.507,
131
+ "rewards/accuracies": 0.5,
132
+ "rewards/chosen": 0.0047130584716796875,
133
+ "rewards/margins": 0.013332920148968697,
134
+ "rewards/rejected": -0.008619861677289009,
135
+ "step": 6
136
+ },
137
+ {
138
+ "debug/policy_chosen_logits": -1.619295597076416,
139
+ "debug/policy_chosen_logps": -240.17440795898438,
140
+ "debug/policy_rejected_logits": -1.5930582284927368,
141
+ "debug/policy_rejected_logps": -303.7572326660156,
142
+ "debug/reference_chosen_logps": -240.71119689941406,
143
+ "debug/reference_rejected_logps": -304.2488708496094,
144
+ "epoch": 0.16279069767441862,
145
+ "grad_norm": 18.12592422181744,
146
+ "learning_rate": 1e-06,
147
+ "logits/chosen": -1.619295597076416,
148
+ "logits/rejected": -1.5930582284927368,
149
+ "logps/chosen": -240.17440795898438,
150
+ "logps/rejected": -303.7572326660156,
151
+ "loss": 0.5014,
152
+ "rewards/accuracies": 0.625,
153
+ "rewards/chosen": 0.005367736332118511,
154
+ "rewards/margins": 0.0004512788727879524,
155
+ "rewards/rejected": 0.004916457924991846,
156
+ "step": 7
157
+ },
158
+ {
159
+ "debug/policy_chosen_logits": -1.5426918268203735,
160
+ "debug/policy_chosen_logps": -242.49334716796875,
161
+ "debug/policy_rejected_logits": -1.515419363975525,
162
+ "debug/policy_rejected_logps": -246.33676147460938,
163
+ "debug/reference_chosen_logps": -245.80419921875,
164
+ "debug/reference_rejected_logps": -248.84983825683594,
165
+ "epoch": 0.18604651162790697,
166
+ "grad_norm": 37.16270378133235,
167
+ "learning_rate": 1e-06,
168
+ "logits/chosen": -1.5426918268203735,
169
+ "logits/rejected": -1.515419363975525,
170
+ "logps/chosen": -242.49334716796875,
171
+ "logps/rejected": -246.33676147460938,
172
+ "loss": 0.4963,
173
+ "rewards/accuracies": 0.625,
174
+ "rewards/chosen": 0.03310825303196907,
175
+ "rewards/margins": 0.007977409288287163,
176
+ "rewards/rejected": 0.02513084188103676,
177
+ "step": 8
178
+ },
179
+ {
180
+ "debug/policy_chosen_logits": -1.5298963785171509,
181
+ "debug/policy_chosen_logps": -225.92041015625,
182
+ "debug/policy_rejected_logits": -1.4147241115570068,
183
+ "debug/policy_rejected_logps": -270.5355224609375,
184
+ "debug/reference_chosen_logps": -228.79443359375,
185
+ "debug/reference_rejected_logps": -272.68603515625,
186
+ "epoch": 0.20930232558139536,
187
+ "grad_norm": 18.861065558487233,
188
+ "learning_rate": 1e-06,
189
+ "logits/chosen": -1.5298963785171509,
190
+ "logits/rejected": -1.4147241115570068,
191
+ "logps/chosen": -225.92041015625,
192
+ "logps/rejected": -270.5355224609375,
193
+ "loss": 0.5,
194
+ "rewards/accuracies": 0.5,
195
+ "rewards/chosen": 0.0287402905523777,
196
+ "rewards/margins": 0.007235164754092693,
197
+ "rewards/rejected": 0.021505124866962433,
198
+ "step": 9
199
+ },
200
+ {
201
+ "debug/policy_chosen_logits": -1.5066547393798828,
202
+ "debug/policy_chosen_logps": -216.030517578125,
203
+ "debug/policy_rejected_logits": -1.4625401496887207,
204
+ "debug/policy_rejected_logps": -217.58367919921875,
205
+ "debug/reference_chosen_logps": -219.08502197265625,
206
+ "debug/reference_rejected_logps": -219.8885955810547,
207
+ "epoch": 0.23255813953488372,
208
+ "grad_norm": 12.691840821738246,
209
+ "learning_rate": 1e-06,
210
+ "logits/chosen": -1.5066547393798828,
211
+ "logits/rejected": -1.4625401496887207,
212
+ "logps/chosen": -216.030517578125,
213
+ "logps/rejected": -217.58367919921875,
214
+ "loss": 0.4982,
215
+ "rewards/accuracies": 0.75,
216
+ "rewards/chosen": 0.030545100569725037,
217
+ "rewards/margins": 0.007496070582419634,
218
+ "rewards/rejected": 0.02304903045296669,
219
+ "step": 10
220
+ },
221
+ {
222
+ "debug/policy_chosen_logits": -1.6109825372695923,
223
+ "debug/policy_chosen_logps": -194.70681762695312,
224
+ "debug/policy_rejected_logits": -1.5127055644989014,
225
+ "debug/policy_rejected_logps": -261.20880126953125,
226
+ "debug/reference_chosen_logps": -200.0032958984375,
227
+ "debug/reference_rejected_logps": -264.6978454589844,
228
+ "epoch": 0.2558139534883721,
229
+ "grad_norm": 34.88211840288691,
230
+ "learning_rate": 1e-06,
231
+ "logits/chosen": -1.6109825372695923,
232
+ "logits/rejected": -1.5127055644989014,
233
+ "logps/chosen": -194.70681762695312,
234
+ "logps/rejected": -261.20880126953125,
235
+ "loss": 0.5055,
236
+ "rewards/accuracies": 0.625,
237
+ "rewards/chosen": 0.05296493321657181,
238
+ "rewards/margins": 0.018074415624141693,
239
+ "rewards/rejected": 0.034890517592430115,
240
+ "step": 11
241
+ },
242
+ {
243
+ "debug/policy_chosen_logits": -1.644713282585144,
244
+ "debug/policy_chosen_logps": -242.20831298828125,
245
+ "debug/policy_rejected_logits": -1.6428948640823364,
246
+ "debug/policy_rejected_logps": -256.0648498535156,
247
+ "debug/reference_chosen_logps": -245.20326232910156,
248
+ "debug/reference_rejected_logps": -257.87481689453125,
249
+ "epoch": 0.27906976744186046,
250
+ "grad_norm": 24.62141741438646,
251
+ "learning_rate": 1e-06,
252
+ "logits/chosen": -1.644713282585144,
253
+ "logits/rejected": -1.6428948640823364,
254
+ "logps/chosen": -242.20831298828125,
255
+ "logps/rejected": -256.0648498535156,
256
+ "loss": 0.507,
257
+ "rewards/accuracies": 0.5,
258
+ "rewards/chosen": 0.029949625954031944,
259
+ "rewards/margins": 0.0118501465767622,
260
+ "rewards/rejected": 0.018099479377269745,
261
+ "step": 12
262
+ },
263
+ {
264
+ "debug/policy_chosen_logits": -1.6160894632339478,
265
+ "debug/policy_chosen_logps": -227.63302612304688,
266
+ "debug/policy_rejected_logits": -1.6384341716766357,
267
+ "debug/policy_rejected_logps": -264.2388916015625,
268
+ "debug/reference_chosen_logps": -230.62490844726562,
269
+ "debug/reference_rejected_logps": -264.9801025390625,
270
+ "epoch": 0.3023255813953488,
271
+ "grad_norm": 10.776881537717472,
272
+ "learning_rate": 1e-06,
273
+ "logits/chosen": -1.6160894632339478,
274
+ "logits/rejected": -1.6384341716766357,
275
+ "logps/chosen": -227.63302612304688,
276
+ "logps/rejected": -264.2388916015625,
277
+ "loss": 0.4976,
278
+ "rewards/accuracies": 0.75,
279
+ "rewards/chosen": 0.029918955639004707,
280
+ "rewards/margins": 0.022506674751639366,
281
+ "rewards/rejected": 0.007412281818687916,
282
+ "step": 13
283
+ },
284
+ {
285
+ "debug/policy_chosen_logits": -1.6279401779174805,
286
+ "debug/policy_chosen_logps": -220.282958984375,
287
+ "debug/policy_rejected_logits": -1.4893845319747925,
288
+ "debug/policy_rejected_logps": -272.10931396484375,
289
+ "debug/reference_chosen_logps": -222.31028747558594,
290
+ "debug/reference_rejected_logps": -272.71044921875,
291
+ "epoch": 0.32558139534883723,
292
+ "grad_norm": 10.036756062227226,
293
+ "learning_rate": 1e-06,
294
+ "logits/chosen": -1.6279401779174805,
295
+ "logits/rejected": -1.4893845319747925,
296
+ "logps/chosen": -220.282958984375,
297
+ "logps/rejected": -272.10931396484375,
298
+ "loss": 0.4935,
299
+ "rewards/accuracies": 0.625,
300
+ "rewards/chosen": 0.020273476839065552,
301
+ "rewards/margins": 0.014262351207435131,
302
+ "rewards/rejected": 0.006011123303323984,
303
+ "step": 14
304
+ },
305
+ {
306
+ "debug/policy_chosen_logits": -1.5787980556488037,
307
+ "debug/policy_chosen_logps": -261.0044250488281,
308
+ "debug/policy_rejected_logits": -1.3867720365524292,
309
+ "debug/policy_rejected_logps": -301.5718994140625,
310
+ "debug/reference_chosen_logps": -258.84735107421875,
311
+ "debug/reference_rejected_logps": -297.58404541015625,
312
+ "epoch": 0.3488372093023256,
313
+ "grad_norm": 10.65558553278192,
314
+ "learning_rate": 1e-06,
315
+ "logits/chosen": -1.5787980556488037,
316
+ "logits/rejected": -1.3867720365524292,
317
+ "logps/chosen": -261.0044250488281,
318
+ "logps/rejected": -301.5718994140625,
319
+ "loss": 0.4873,
320
+ "rewards/accuracies": 0.875,
321
+ "rewards/chosen": -0.021570798009634018,
322
+ "rewards/margins": 0.018307799473404884,
323
+ "rewards/rejected": -0.03987859562039375,
324
+ "step": 15
325
+ },
326
+ {
327
+ "debug/policy_chosen_logits": -1.5956577062606812,
328
+ "debug/policy_chosen_logps": -222.5416259765625,
329
+ "debug/policy_rejected_logits": -1.4434815645217896,
330
+ "debug/policy_rejected_logps": -279.348388671875,
331
+ "debug/reference_chosen_logps": -221.23260498046875,
332
+ "debug/reference_rejected_logps": -273.97540283203125,
333
+ "epoch": 0.37209302325581395,
334
+ "grad_norm": 36.49853384207237,
335
+ "learning_rate": 1e-06,
336
+ "logits/chosen": -1.5956577062606812,
337
+ "logits/rejected": -1.4434815645217896,
338
+ "logps/chosen": -222.5416259765625,
339
+ "logps/rejected": -279.348388671875,
340
+ "loss": 0.5003,
341
+ "rewards/accuracies": 0.625,
342
+ "rewards/chosen": -0.013090074062347412,
343
+ "rewards/margins": 0.040639691054821014,
344
+ "rewards/rejected": -0.053729765117168427,
345
+ "step": 16
346
+ },
347
+ {
348
+ "debug/policy_chosen_logits": -1.4420965909957886,
349
+ "debug/policy_chosen_logps": -215.4423828125,
350
+ "debug/policy_rejected_logits": -1.5232738256454468,
351
+ "debug/policy_rejected_logps": -288.1341552734375,
352
+ "debug/reference_chosen_logps": -213.68832397460938,
353
+ "debug/reference_rejected_logps": -286.45086669921875,
354
+ "epoch": 0.3953488372093023,
355
+ "grad_norm": 24.203474268576745,
356
+ "learning_rate": 1e-06,
357
+ "logits/chosen": -1.4420965909957886,
358
+ "logits/rejected": -1.5232738256454468,
359
+ "logps/chosen": -215.4423828125,
360
+ "logps/rejected": -288.1341552734375,
361
+ "loss": 0.4918,
362
+ "rewards/accuracies": 0.875,
363
+ "rewards/chosen": -0.01754041761159897,
364
+ "rewards/margins": -0.0007077232003211975,
365
+ "rewards/rejected": -0.01683269441127777,
366
+ "step": 17
367
+ },
368
+ {
369
+ "debug/policy_chosen_logits": -1.4279348850250244,
370
+ "debug/policy_chosen_logps": -242.89749145507812,
371
+ "debug/policy_rejected_logits": -1.3261935710906982,
372
+ "debug/policy_rejected_logps": -230.28863525390625,
373
+ "debug/reference_chosen_logps": -240.6783447265625,
374
+ "debug/reference_rejected_logps": -226.51815795898438,
375
+ "epoch": 0.4186046511627907,
376
+ "grad_norm": 44.37824041962939,
377
+ "learning_rate": 1e-06,
378
+ "logits/chosen": -1.4279348850250244,
379
+ "logits/rejected": -1.3261935710906982,
380
+ "logps/chosen": -242.89749145507812,
381
+ "logps/rejected": -230.28863525390625,
382
+ "loss": 0.5044,
383
+ "rewards/accuracies": 0.5,
384
+ "rewards/chosen": -0.022191638126969337,
385
+ "rewards/margins": 0.01551321055740118,
386
+ "rewards/rejected": -0.03770485147833824,
387
+ "step": 18
388
+ },
389
+ {
390
+ "debug/policy_chosen_logits": -1.58405339717865,
391
+ "debug/policy_chosen_logps": -236.5458984375,
392
+ "debug/policy_rejected_logits": -1.5539088249206543,
393
+ "debug/policy_rejected_logps": -233.43719482421875,
394
+ "debug/reference_chosen_logps": -236.3321533203125,
395
+ "debug/reference_rejected_logps": -231.97726440429688,
396
+ "epoch": 0.4418604651162791,
397
+ "grad_norm": 12.540633658003186,
398
+ "learning_rate": 1e-06,
399
+ "logits/chosen": -1.58405339717865,
400
+ "logits/rejected": -1.5539088249206543,
401
+ "logps/chosen": -236.5458984375,
402
+ "logps/rejected": -233.43719482421875,
403
+ "loss": 0.5059,
404
+ "rewards/accuracies": 0.625,
405
+ "rewards/chosen": -0.002137584611773491,
406
+ "rewards/margins": 0.012461718171834946,
407
+ "rewards/rejected": -0.014599304646253586,
408
+ "step": 19
409
+ },
410
+ {
411
+ "debug/policy_chosen_logits": -1.466562032699585,
412
+ "debug/policy_chosen_logps": -194.52447509765625,
413
+ "debug/policy_rejected_logits": -1.4163392782211304,
414
+ "debug/policy_rejected_logps": -237.53216552734375,
415
+ "debug/reference_chosen_logps": -194.98049926757812,
416
+ "debug/reference_rejected_logps": -237.75314331054688,
417
+ "epoch": 0.46511627906976744,
418
+ "grad_norm": 14.133924214011163,
419
+ "learning_rate": 1e-06,
420
+ "logits/chosen": -1.466562032699585,
421
+ "logits/rejected": -1.4163392782211304,
422
+ "logps/chosen": -194.52447509765625,
423
+ "logps/rejected": -237.53216552734375,
424
+ "loss": 0.4895,
425
+ "rewards/accuracies": 0.625,
426
+ "rewards/chosen": 0.004560394212603569,
427
+ "rewards/margins": 0.00235048308968544,
428
+ "rewards/rejected": 0.0022099113557487726,
429
+ "step": 20
430
+ },
431
+ {
432
+ "debug/policy_chosen_logits": -1.5900827646255493,
433
+ "debug/policy_chosen_logps": -212.4789276123047,
434
+ "debug/policy_rejected_logits": -1.4683177471160889,
435
+ "debug/policy_rejected_logps": -257.5311279296875,
436
+ "debug/reference_chosen_logps": -211.9457550048828,
437
+ "debug/reference_rejected_logps": -254.302490234375,
438
+ "epoch": 0.4883720930232558,
439
+ "grad_norm": 17.882111129846162,
440
+ "learning_rate": 1e-06,
441
+ "logits/chosen": -1.5900827646255493,
442
+ "logits/rejected": -1.4683177471160889,
443
+ "logps/chosen": -212.4789276123047,
444
+ "logps/rejected": -257.5311279296875,
445
+ "loss": 0.4799,
446
+ "rewards/accuracies": 0.625,
447
+ "rewards/chosen": -0.00533168762922287,
448
+ "rewards/margins": 0.026954688131809235,
449
+ "rewards/rejected": -0.032286375761032104,
450
+ "step": 21
451
+ },
452
+ {
453
+ "debug/policy_chosen_logits": -1.4656310081481934,
454
+ "debug/policy_chosen_logps": -263.0152282714844,
455
+ "debug/policy_rejected_logits": -1.3645009994506836,
456
+ "debug/policy_rejected_logps": -283.24566650390625,
457
+ "debug/reference_chosen_logps": -262.25970458984375,
458
+ "debug/reference_rejected_logps": -280.8420715332031,
459
+ "epoch": 0.5116279069767442,
460
+ "grad_norm": 17.255770475731207,
461
+ "learning_rate": 1e-06,
462
+ "logits/chosen": -1.4656310081481934,
463
+ "logits/rejected": -1.3645009994506836,
464
+ "logps/chosen": -263.0152282714844,
465
+ "logps/rejected": -283.24566650390625,
466
+ "loss": 0.479,
467
+ "rewards/accuracies": 0.75,
468
+ "rewards/chosen": -0.007554950192570686,
469
+ "rewards/margins": 0.01648113504052162,
470
+ "rewards/rejected": -0.02403608150780201,
471
+ "step": 22
472
+ },
473
+ {
474
+ "debug/policy_chosen_logits": -1.4521846771240234,
475
+ "debug/policy_chosen_logps": -216.39169311523438,
476
+ "debug/policy_rejected_logits": -1.3643300533294678,
477
+ "debug/policy_rejected_logps": -281.2818603515625,
478
+ "debug/reference_chosen_logps": -223.86587524414062,
479
+ "debug/reference_rejected_logps": -278.6108703613281,
480
+ "epoch": 0.5348837209302325,
481
+ "grad_norm": 22.42065485842657,
482
+ "learning_rate": 1e-06,
483
+ "logits/chosen": -1.4521846771240234,
484
+ "logits/rejected": -1.3643300533294678,
485
+ "logps/chosen": -216.39169311523438,
486
+ "logps/rejected": -281.2818603515625,
487
+ "loss": 0.4761,
488
+ "rewards/accuracies": 0.875,
489
+ "rewards/chosen": 0.07474187761545181,
490
+ "rewards/margins": 0.10145200788974762,
491
+ "rewards/rejected": -0.02671012654900551,
492
+ "step": 23
493
+ },
494
+ {
495
+ "debug/policy_chosen_logits": -1.596596121788025,
496
+ "debug/policy_chosen_logps": -240.6717987060547,
497
+ "debug/policy_rejected_logits": -1.5593181848526,
498
+ "debug/policy_rejected_logps": -339.8119812011719,
499
+ "debug/reference_chosen_logps": -242.0146484375,
500
+ "debug/reference_rejected_logps": -335.9112548828125,
501
+ "epoch": 0.5581395348837209,
502
+ "grad_norm": 15.69048628226096,
503
+ "learning_rate": 1e-06,
504
+ "logits/chosen": -1.596596121788025,
505
+ "logits/rejected": -1.5593181848526,
506
+ "logps/chosen": -240.6717987060547,
507
+ "logps/rejected": -339.8119812011719,
508
+ "loss": 0.4765,
509
+ "rewards/accuracies": 0.875,
510
+ "rewards/chosen": 0.013428498059511185,
511
+ "rewards/margins": 0.05243583396077156,
512
+ "rewards/rejected": -0.039007339626550674,
513
+ "step": 24
514
+ },
515
+ {
516
+ "debug/policy_chosen_logits": -1.5627775192260742,
517
+ "debug/policy_chosen_logps": -243.03045654296875,
518
+ "debug/policy_rejected_logits": -1.5146582126617432,
519
+ "debug/policy_rejected_logps": -339.4427490234375,
520
+ "debug/reference_chosen_logps": -244.926513671875,
521
+ "debug/reference_rejected_logps": -337.1531982421875,
522
+ "epoch": 0.5813953488372093,
523
+ "grad_norm": 23.599376538578916,
524
+ "learning_rate": 1e-06,
525
+ "logits/chosen": -1.5627775192260742,
526
+ "logits/rejected": -1.5146582126617432,
527
+ "logps/chosen": -243.03045654296875,
528
+ "logps/rejected": -339.4427490234375,
529
+ "loss": 0.4976,
530
+ "rewards/accuracies": 0.75,
531
+ "rewards/chosen": 0.018960533663630486,
532
+ "rewards/margins": 0.04185573384165764,
533
+ "rewards/rejected": -0.02289520390331745,
534
+ "step": 25
535
+ },
536
+ {
537
+ "debug/policy_chosen_logits": -1.4482909440994263,
538
+ "debug/policy_chosen_logps": -243.66656494140625,
539
+ "debug/policy_rejected_logits": -1.3864490985870361,
540
+ "debug/policy_rejected_logps": -269.7567138671875,
541
+ "debug/reference_chosen_logps": -243.5550079345703,
542
+ "debug/reference_rejected_logps": -265.46270751953125,
543
+ "epoch": 0.6046511627906976,
544
+ "grad_norm": 15.856946618319062,
545
+ "learning_rate": 1e-06,
546
+ "logits/chosen": -1.4482909440994263,
547
+ "logits/rejected": -1.3864490985870361,
548
+ "logps/chosen": -243.66656494140625,
549
+ "logps/rejected": -269.7567138671875,
550
+ "loss": 0.4718,
551
+ "rewards/accuracies": 0.875,
552
+ "rewards/chosen": -0.001115493942052126,
553
+ "rewards/margins": 0.04182462394237518,
554
+ "rewards/rejected": -0.04294012114405632,
555
+ "step": 26
556
+ },
557
+ {
558
+ "debug/policy_chosen_logits": -1.6767849922180176,
559
+ "debug/policy_chosen_logps": -235.87283325195312,
560
+ "debug/policy_rejected_logits": -1.4402155876159668,
561
+ "debug/policy_rejected_logps": -255.25155639648438,
562
+ "debug/reference_chosen_logps": -234.12271118164062,
563
+ "debug/reference_rejected_logps": -252.92681884765625,
564
+ "epoch": 0.627906976744186,
565
+ "grad_norm": 26.8171661009806,
566
+ "learning_rate": 1e-06,
567
+ "logits/chosen": -1.6767849922180176,
568
+ "logits/rejected": -1.4402155876159668,
569
+ "logps/chosen": -235.87283325195312,
570
+ "logps/rejected": -255.25155639648438,
571
+ "loss": 0.4911,
572
+ "rewards/accuracies": 0.5,
573
+ "rewards/chosen": -0.017501164227724075,
574
+ "rewards/margins": 0.005746154114603996,
575
+ "rewards/rejected": -0.023247316479682922,
576
+ "step": 27
577
+ },
578
+ {
579
+ "debug/policy_chosen_logits": -1.474959135055542,
580
+ "debug/policy_chosen_logps": -250.03805541992188,
581
+ "debug/policy_rejected_logits": -1.33174729347229,
582
+ "debug/policy_rejected_logps": -275.7778015136719,
583
+ "debug/reference_chosen_logps": -246.68450927734375,
584
+ "debug/reference_rejected_logps": -271.7251892089844,
585
+ "epoch": 0.6511627906976745,
586
+ "grad_norm": 23.009106257244806,
587
+ "learning_rate": 1e-06,
588
+ "logits/chosen": -1.474959135055542,
589
+ "logits/rejected": -1.33174729347229,
590
+ "logps/chosen": -250.03805541992188,
591
+ "logps/rejected": -275.7778015136719,
592
+ "loss": 0.4741,
593
+ "rewards/accuracies": 0.5,
594
+ "rewards/chosen": -0.03353559225797653,
595
+ "rewards/margins": 0.006990719586610794,
596
+ "rewards/rejected": -0.040526311844587326,
597
+ "step": 28
598
+ },
599
+ {
600
+ "debug/policy_chosen_logits": -1.5180094242095947,
601
+ "debug/policy_chosen_logps": -239.39962768554688,
602
+ "debug/policy_rejected_logits": -1.3486112356185913,
603
+ "debug/policy_rejected_logps": -280.152587890625,
604
+ "debug/reference_chosen_logps": -235.7870635986328,
605
+ "debug/reference_rejected_logps": -271.69512939453125,
606
+ "epoch": 0.6744186046511628,
607
+ "grad_norm": 29.559799359310976,
608
+ "learning_rate": 1e-06,
609
+ "logits/chosen": -1.5180094242095947,
610
+ "logits/rejected": -1.3486112356185913,
611
+ "logps/chosen": -239.39962768554688,
612
+ "logps/rejected": -280.152587890625,
613
+ "loss": 0.4648,
614
+ "rewards/accuracies": 0.875,
615
+ "rewards/chosen": -0.03612573444843292,
616
+ "rewards/margins": 0.04844905436038971,
617
+ "rewards/rejected": -0.08457479625940323,
618
+ "step": 29
619
+ },
620
+ {
621
+ "debug/policy_chosen_logits": -1.5323574542999268,
622
+ "debug/policy_chosen_logps": -230.34732055664062,
623
+ "debug/policy_rejected_logits": -1.4196269512176514,
624
+ "debug/policy_rejected_logps": -299.549072265625,
625
+ "debug/reference_chosen_logps": -227.33663940429688,
626
+ "debug/reference_rejected_logps": -290.41229248046875,
627
+ "epoch": 0.6976744186046512,
628
+ "grad_norm": 24.104606390659608,
629
+ "learning_rate": 1e-06,
630
+ "logits/chosen": -1.5323574542999268,
631
+ "logits/rejected": -1.4196269512176514,
632
+ "logps/chosen": -230.34732055664062,
633
+ "logps/rejected": -299.549072265625,
634
+ "loss": 0.4845,
635
+ "rewards/accuracies": 0.625,
636
+ "rewards/chosen": -0.03010694310069084,
637
+ "rewards/margins": 0.0612606406211853,
638
+ "rewards/rejected": -0.09136758744716644,
639
+ "step": 30
640
+ },
641
+ {
642
+ "debug/policy_chosen_logits": -1.5471208095550537,
643
+ "debug/policy_chosen_logps": -233.48435974121094,
644
+ "debug/policy_rejected_logits": -1.507702350616455,
645
+ "debug/policy_rejected_logps": -299.49298095703125,
646
+ "debug/reference_chosen_logps": -230.37808227539062,
647
+ "debug/reference_rejected_logps": -290.85491943359375,
648
+ "epoch": 0.7209302325581395,
649
+ "grad_norm": 11.501362477806966,
650
+ "learning_rate": 1e-06,
651
+ "logits/chosen": -1.5471208095550537,
652
+ "logits/rejected": -1.507702350616455,
653
+ "logps/chosen": -233.48435974121094,
654
+ "logps/rejected": -299.49298095703125,
655
+ "loss": 0.4777,
656
+ "rewards/accuracies": 0.75,
657
+ "rewards/chosen": -0.031062887981534004,
658
+ "rewards/margins": 0.05531751364469528,
659
+ "rewards/rejected": -0.08638040721416473,
660
+ "step": 31
661
+ },
662
+ {
663
+ "debug/policy_chosen_logits": -1.3573694229125977,
664
+ "debug/policy_chosen_logps": -276.0484619140625,
665
+ "debug/policy_rejected_logits": -1.4295967817306519,
666
+ "debug/policy_rejected_logps": -251.19253540039062,
667
+ "debug/reference_chosen_logps": -269.07147216796875,
668
+ "debug/reference_rejected_logps": -245.41119384765625,
669
+ "epoch": 0.7441860465116279,
670
+ "grad_norm": 44.933121351184646,
671
+ "learning_rate": 1e-06,
672
+ "logits/chosen": -1.3573694229125977,
673
+ "logits/rejected": -1.4295967817306519,
674
+ "logps/chosen": -276.0484619140625,
675
+ "logps/rejected": -251.19253540039062,
676
+ "loss": 0.4882,
677
+ "rewards/accuracies": 0.375,
678
+ "rewards/chosen": -0.06977000832557678,
679
+ "rewards/margins": -0.011956671252846718,
680
+ "rewards/rejected": -0.057813338935375214,
681
+ "step": 32
682
+ },
683
+ {
684
+ "debug/policy_chosen_logits": -1.532698392868042,
685
+ "debug/policy_chosen_logps": -210.01528930664062,
686
+ "debug/policy_rejected_logits": -1.5289320945739746,
687
+ "debug/policy_rejected_logps": -266.588134765625,
688
+ "debug/reference_chosen_logps": -212.3720245361328,
689
+ "debug/reference_rejected_logps": -261.5830993652344,
690
+ "epoch": 0.7674418604651163,
691
+ "grad_norm": 16.92613442301389,
692
+ "learning_rate": 1e-06,
693
+ "logits/chosen": -1.532698392868042,
694
+ "logits/rejected": -1.5289320945739746,
695
+ "logps/chosen": -210.01528930664062,
696
+ "logps/rejected": -266.588134765625,
697
+ "loss": 0.4701,
698
+ "rewards/accuracies": 0.75,
699
+ "rewards/chosen": 0.023567447438836098,
700
+ "rewards/margins": 0.0736178606748581,
701
+ "rewards/rejected": -0.050050411373376846,
702
+ "step": 33
703
+ },
704
+ {
705
+ "debug/policy_chosen_logits": -1.6075690984725952,
706
+ "debug/policy_chosen_logps": -222.28057861328125,
707
+ "debug/policy_rejected_logits": -1.5301072597503662,
708
+ "debug/policy_rejected_logps": -231.0042724609375,
709
+ "debug/reference_chosen_logps": -221.35081481933594,
710
+ "debug/reference_rejected_logps": -232.4139404296875,
711
+ "epoch": 0.7906976744186046,
712
+ "grad_norm": 30.286832542440518,
713
+ "learning_rate": 1e-06,
714
+ "logits/chosen": -1.6075690984725952,
715
+ "logits/rejected": -1.5301072597503662,
716
+ "logps/chosen": -222.28057861328125,
717
+ "logps/rejected": -231.0042724609375,
718
+ "loss": 0.4957,
719
+ "rewards/accuracies": 0.375,
720
+ "rewards/chosen": -0.009297618642449379,
721
+ "rewards/margins": -0.023394297808408737,
722
+ "rewards/rejected": 0.014096679165959358,
723
+ "step": 34
724
+ },
725
+ {
726
+ "debug/policy_chosen_logits": -1.6365246772766113,
727
+ "debug/policy_chosen_logps": -214.15689086914062,
728
+ "debug/policy_rejected_logits": -1.3568267822265625,
729
+ "debug/policy_rejected_logps": -274.84320068359375,
730
+ "debug/reference_chosen_logps": -219.61041259765625,
731
+ "debug/reference_rejected_logps": -271.06793212890625,
732
+ "epoch": 0.813953488372093,
733
+ "grad_norm": 10.413515346816709,
734
+ "learning_rate": 1e-06,
735
+ "logits/chosen": -1.6365246772766113,
736
+ "logits/rejected": -1.3568267822265625,
737
+ "logps/chosen": -214.15689086914062,
738
+ "logps/rejected": -274.84320068359375,
739
+ "loss": 0.4745,
740
+ "rewards/accuracies": 0.875,
741
+ "rewards/chosen": 0.05453508347272873,
742
+ "rewards/margins": 0.09228822588920593,
743
+ "rewards/rejected": -0.0377531424164772,
744
+ "step": 35
745
+ },
746
+ {
747
+ "debug/policy_chosen_logits": -1.4882985353469849,
748
+ "debug/policy_chosen_logps": -224.943115234375,
749
+ "debug/policy_rejected_logits": -1.5247392654418945,
750
+ "debug/policy_rejected_logps": -291.1658020019531,
751
+ "debug/reference_chosen_logps": -230.88302612304688,
752
+ "debug/reference_rejected_logps": -292.3128967285156,
753
+ "epoch": 0.8372093023255814,
754
+ "grad_norm": 21.658575078370163,
755
+ "learning_rate": 1e-06,
756
+ "logits/chosen": -1.4882985353469849,
757
+ "logits/rejected": -1.5247392654418945,
758
+ "logps/chosen": -224.943115234375,
759
+ "logps/rejected": -291.1658020019531,
760
+ "loss": 0.4603,
761
+ "rewards/accuracies": 0.625,
762
+ "rewards/chosen": 0.059399355202913284,
763
+ "rewards/margins": 0.04792825132608414,
764
+ "rewards/rejected": 0.011471100151538849,
765
+ "step": 36
766
+ },
767
+ {
768
+ "debug/policy_chosen_logits": -1.5648789405822754,
769
+ "debug/policy_chosen_logps": -232.97958374023438,
770
+ "debug/policy_rejected_logits": -1.4898722171783447,
771
+ "debug/policy_rejected_logps": -264.453369140625,
772
+ "debug/reference_chosen_logps": -236.86109924316406,
773
+ "debug/reference_rejected_logps": -266.53448486328125,
774
+ "epoch": 0.8604651162790697,
775
+ "grad_norm": 13.183542989418173,
776
+ "learning_rate": 1e-06,
777
+ "logits/chosen": -1.5648789405822754,
778
+ "logits/rejected": -1.4898722171783447,
779
+ "logps/chosen": -232.97958374023438,
780
+ "logps/rejected": -264.453369140625,
781
+ "loss": 0.4871,
782
+ "rewards/accuracies": 0.625,
783
+ "rewards/chosen": 0.03881513699889183,
784
+ "rewards/margins": 0.018004285171628,
785
+ "rewards/rejected": 0.020810849964618683,
786
+ "step": 37
787
+ },
788
+ {
789
+ "debug/policy_chosen_logits": -1.3791043758392334,
790
+ "debug/policy_chosen_logps": -228.21205139160156,
791
+ "debug/policy_rejected_logits": -1.3903659582138062,
792
+ "debug/policy_rejected_logps": -227.752197265625,
793
+ "debug/reference_chosen_logps": -230.1770477294922,
794
+ "debug/reference_rejected_logps": -230.64239501953125,
795
+ "epoch": 0.8837209302325582,
796
+ "grad_norm": 17.81188100770416,
797
+ "learning_rate": 1e-06,
798
+ "logits/chosen": -1.3791043758392334,
799
+ "logits/rejected": -1.3903659582138062,
800
+ "logps/chosen": -228.21205139160156,
801
+ "logps/rejected": -227.752197265625,
802
+ "loss": 0.4778,
803
+ "rewards/accuracies": 0.5,
804
+ "rewards/chosen": 0.019649982452392578,
805
+ "rewards/margins": -0.009251842275261879,
806
+ "rewards/rejected": 0.028901822865009308,
807
+ "step": 38
808
+ },
809
+ {
810
+ "debug/policy_chosen_logits": -1.4482218027114868,
811
+ "debug/policy_chosen_logps": -247.0265655517578,
812
+ "debug/policy_rejected_logits": -1.3742115497589111,
813
+ "debug/policy_rejected_logps": -305.1549072265625,
814
+ "debug/reference_chosen_logps": -249.18740844726562,
815
+ "debug/reference_rejected_logps": -301.026123046875,
816
+ "epoch": 0.9069767441860465,
817
+ "grad_norm": 18.246091231595155,
818
+ "learning_rate": 1e-06,
819
+ "logits/chosen": -1.4482218027114868,
820
+ "logits/rejected": -1.3742115497589111,
821
+ "logps/chosen": -247.0265655517578,
822
+ "logps/rejected": -305.1549072265625,
823
+ "loss": 0.4569,
824
+ "rewards/accuracies": 0.625,
825
+ "rewards/chosen": 0.021608371287584305,
826
+ "rewards/margins": 0.0628962367773056,
827
+ "rewards/rejected": -0.0412878580391407,
828
+ "step": 39
829
+ },
830
+ {
831
+ "debug/policy_chosen_logits": -1.5543980598449707,
832
+ "debug/policy_chosen_logps": -257.4435729980469,
833
+ "debug/policy_rejected_logits": -1.5073050260543823,
834
+ "debug/policy_rejected_logps": -296.2988586425781,
835
+ "debug/reference_chosen_logps": -256.39544677734375,
836
+ "debug/reference_rejected_logps": -288.50433349609375,
837
+ "epoch": 0.9302325581395349,
838
+ "grad_norm": 14.177163201466067,
839
+ "learning_rate": 1e-06,
840
+ "logits/chosen": -1.5543980598449707,
841
+ "logits/rejected": -1.5073050260543823,
842
+ "logps/chosen": -257.4435729980469,
843
+ "logps/rejected": -296.2988586425781,
844
+ "loss": 0.4851,
845
+ "rewards/accuracies": 0.75,
846
+ "rewards/chosen": -0.010481302626430988,
847
+ "rewards/margins": 0.0674639493227005,
848
+ "rewards/rejected": -0.07794524729251862,
849
+ "step": 40
850
+ },
851
+ {
852
+ "debug/policy_chosen_logits": -1.5510001182556152,
853
+ "debug/policy_chosen_logps": -262.91461181640625,
854
+ "debug/policy_rejected_logits": -1.4993880987167358,
855
+ "debug/policy_rejected_logps": -288.2521667480469,
856
+ "debug/reference_chosen_logps": -253.84947204589844,
857
+ "debug/reference_rejected_logps": -285.00738525390625,
858
+ "epoch": 0.9534883720930233,
859
+ "grad_norm": 64.06356256852192,
860
+ "learning_rate": 1e-06,
861
+ "logits/chosen": -1.5510001182556152,
862
+ "logits/rejected": -1.4993880987167358,
863
+ "logps/chosen": -262.91461181640625,
864
+ "logps/rejected": -288.2521667480469,
865
+ "loss": 0.4995,
866
+ "rewards/accuracies": 0.125,
867
+ "rewards/chosen": -0.09065132588148117,
868
+ "rewards/margins": -0.05820371210575104,
869
+ "rewards/rejected": -0.032447606325149536,
870
+ "step": 41
871
+ },
872
+ {
873
+ "debug/policy_chosen_logits": -1.6003302335739136,
874
+ "debug/policy_chosen_logps": -224.11663818359375,
875
+ "debug/policy_rejected_logits": -1.5286082029342651,
876
+ "debug/policy_rejected_logps": -271.67974853515625,
877
+ "debug/reference_chosen_logps": -221.91209411621094,
878
+ "debug/reference_rejected_logps": -258.791015625,
879
+ "epoch": 0.9767441860465116,
880
+ "grad_norm": 61.90638727730458,
881
+ "learning_rate": 1e-06,
882
+ "logits/chosen": -1.6003302335739136,
883
+ "logits/rejected": -1.5286082029342651,
884
+ "logps/chosen": -224.11663818359375,
885
+ "logps/rejected": -271.67974853515625,
886
+ "loss": 0.4904,
887
+ "rewards/accuracies": 0.625,
888
+ "rewards/chosen": -0.022045554593205452,
889
+ "rewards/margins": 0.10684183239936829,
890
+ "rewards/rejected": -0.1288873851299286,
891
+ "step": 42
892
+ },
893
+ {
894
+ "debug/policy_chosen_logits": -1.5732522010803223,
895
+ "debug/policy_chosen_logps": -237.1086883544922,
896
+ "debug/policy_rejected_logits": -1.405612826347351,
897
+ "debug/policy_rejected_logps": -320.40643310546875,
898
+ "debug/reference_chosen_logps": -234.65167236328125,
899
+ "debug/reference_rejected_logps": -311.38165283203125,
900
+ "epoch": 1.0,
901
+ "grad_norm": 43.37698691621295,
902
+ "learning_rate": 1e-06,
903
+ "logits/chosen": -1.5732522010803223,
904
+ "logits/rejected": -1.405612826347351,
905
+ "logps/chosen": -237.1086883544922,
906
+ "logps/rejected": -320.40643310546875,
907
+ "loss": 0.4906,
908
+ "rewards/accuracies": 0.5,
909
+ "rewards/chosen": -0.024570178240537643,
910
+ "rewards/margins": 0.065677709877491,
911
+ "rewards/rejected": -0.09024789184331894,
912
+ "step": 43
913
+ },
914
+ {
915
+ "epoch": 1.0,
916
+ "step": 43,
917
+ "total_flos": 0.0,
918
+ "train_loss": 0.489490317743878,
919
+ "train_runtime": 149.7969,
920
+ "train_samples_per_second": 18.318,
921
+ "train_steps_per_second": 0.287
922
+ }
923
+ ],
924
+ "logging_steps": 1,
925
+ "max_steps": 43,
926
+ "num_input_tokens_seen": 0,
927
+ "num_train_epochs": 1,
928
+ "save_steps": 500,
929
+ "stateful_callbacks": {
930
+ "TrainerControl": {
931
+ "args": {
932
+ "should_epoch_stop": false,
933
+ "should_evaluate": false,
934
+ "should_log": false,
935
+ "should_save": true,
936
+ "should_training_stop": true
937
+ },
938
+ "attributes": {}
939
+ }
940
+ },
941
+ "total_flos": 0.0,
942
+ "train_batch_size": 8,
943
+ "trial_name": null,
944
+ "trial_params": null
945
+ }