File size: 6,855 Bytes
58e897c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 100,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 133.0057671141645,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 123.11854553222656,
"logits/rejected": 97.00198364257812,
"logps/chosen": -425.18585205078125,
"logps/rejected": -424.1869201660156,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.19,
"grad_norm": 182.25849293095231,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 117.33037567138672,
"logits/rejected": 136.23989868164062,
"logps/chosen": -442.517333984375,
"logps/rejected": -524.100830078125,
"loss": 0.7193,
"rewards/accuracies": 0.4791666567325592,
"rewards/chosen": 0.04355551674962044,
"rewards/margins": 0.04285159707069397,
"rewards/rejected": 0.0007039175252430141,
"step": 10
},
{
"epoch": 0.38,
"grad_norm": 98.47580983421109,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 124.34559631347656,
"logits/rejected": 131.87884521484375,
"logps/chosen": -425.8033142089844,
"logps/rejected": -496.58966064453125,
"loss": 0.5981,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.02306445501744747,
"rewards/margins": 0.4770180284976959,
"rewards/rejected": -0.4539535939693451,
"step": 20
},
{
"epoch": 0.57,
"grad_norm": 114.087847259367,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 121.997314453125,
"logits/rejected": 125.30668640136719,
"logps/chosen": -473.92950439453125,
"logps/rejected": -546.4301147460938,
"loss": 0.5828,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.9404676556587219,
"rewards/margins": 1.0203006267547607,
"rewards/rejected": -1.9607683420181274,
"step": 30
},
{
"epoch": 0.76,
"grad_norm": 102.23106994021863,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 123.2125473022461,
"logits/rejected": 118.11393737792969,
"logps/chosen": -498.98480224609375,
"logps/rejected": -529.0015869140625,
"loss": 0.5432,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.29017174243927,
"rewards/margins": 0.921674907207489,
"rewards/rejected": -2.2118465900421143,
"step": 40
},
{
"epoch": 0.95,
"grad_norm": 116.89709640403532,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 131.3624267578125,
"logits/rejected": 131.5041046142578,
"logps/chosen": -517.221923828125,
"logps/rejected": -547.076904296875,
"loss": 0.5007,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.7199589014053345,
"rewards/margins": 1.0714657306671143,
"rewards/rejected": -2.791424512863159,
"step": 50
},
{
"epoch": 1.14,
"grad_norm": 53.67728401735822,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 130.25340270996094,
"logits/rejected": 133.3688507080078,
"logps/chosen": -482.68798828125,
"logps/rejected": -531.1278076171875,
"loss": 0.2897,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": -1.2961251735687256,
"rewards/margins": 2.0452933311462402,
"rewards/rejected": -3.341418504714966,
"step": 60
},
{
"epoch": 1.33,
"grad_norm": 46.49732419269788,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 125.44151306152344,
"logits/rejected": 126.0068588256836,
"logps/chosen": -451.8477478027344,
"logps/rejected": -573.8129272460938,
"loss": 0.1884,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -1.1466975212097168,
"rewards/margins": 2.4370884895324707,
"rewards/rejected": -3.5837860107421875,
"step": 70
},
{
"epoch": 1.52,
"grad_norm": 48.86628168194261,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 129.2606201171875,
"logits/rejected": 130.1272735595703,
"logps/chosen": -510.95819091796875,
"logps/rejected": -606.6770629882812,
"loss": 0.1877,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": -1.0929005146026611,
"rewards/margins": 2.9041507244110107,
"rewards/rejected": -3.9970507621765137,
"step": 80
},
{
"epoch": 1.71,
"grad_norm": 51.734548215896965,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 116.0444564819336,
"logits/rejected": 118.35429382324219,
"logps/chosen": -488.314697265625,
"logps/rejected": -571.8872680664062,
"loss": 0.1774,
"rewards/accuracies": 0.96875,
"rewards/chosen": -1.3073575496673584,
"rewards/margins": 3.0684092044830322,
"rewards/rejected": -4.375766277313232,
"step": 90
},
{
"epoch": 1.9,
"grad_norm": 58.09774424636419,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 129.8248748779297,
"logits/rejected": 120.5040512084961,
"logps/chosen": -505.01513671875,
"logps/rejected": -610.0936279296875,
"loss": 0.193,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -1.331251859664917,
"rewards/margins": 3.034428596496582,
"rewards/rejected": -4.36568021774292,
"step": 100
},
{
"epoch": 1.9,
"eval_logits/chosen": 99.29187774658203,
"eval_logits/rejected": 93.2354736328125,
"eval_logps/chosen": -493.443359375,
"eval_logps/rejected": -520.8292236328125,
"eval_loss": 0.4772518575191498,
"eval_rewards/accuracies": 0.7291666865348816,
"eval_rewards/chosen": -2.0019426345825195,
"eval_rewards/margins": 1.4620394706726074,
"eval_rewards/rejected": -3.463982343673706,
"eval_runtime": 49.3044,
"eval_samples_per_second": 15.212,
"eval_steps_per_second": 0.487,
"step": 100
},
{
"epoch": 1.97,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.3880494168171516,
"train_runtime": 1239.5887,
"train_samples_per_second": 10.891,
"train_steps_per_second": 0.084
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|