|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 260, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.038461538461538464, |
|
"grad_norm": 2.890625, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 3.3093, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19230769230769232, |
|
"grad_norm": 4.90625, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 3.317, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.38461538461538464, |
|
"grad_norm": 2.25, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 3.1254, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5769230769230769, |
|
"grad_norm": 3.328125, |
|
"learning_rate": 0.00011538461538461538, |
|
"loss": 2.6178, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 1.5390625, |
|
"learning_rate": 0.00015384615384615385, |
|
"loss": 2.3151, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.9615384615384616, |
|
"grad_norm": 0.98828125, |
|
"learning_rate": 0.00019230769230769233, |
|
"loss": 2.0459, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.863295793533325, |
|
"eval_runtime": 0.4843, |
|
"eval_samples_per_second": 43.364, |
|
"eval_steps_per_second": 2.065, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.1538461538461537, |
|
"grad_norm": 0.546875, |
|
"learning_rate": 0.00019985583705641418, |
|
"loss": 1.6915, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.3461538461538463, |
|
"grad_norm": 0.400390625, |
|
"learning_rate": 0.0001992708874098054, |
|
"loss": 1.4892, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 0.00019823877374156647, |
|
"loss": 1.3906, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.7307692307692308, |
|
"grad_norm": 0.73046875, |
|
"learning_rate": 0.00019676414519013781, |
|
"loss": 1.3301, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.9230769230769231, |
|
"grad_norm": 0.310546875, |
|
"learning_rate": 0.00019485364419471454, |
|
"loss": 1.2724, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.706272602081299, |
|
"eval_runtime": 0.4877, |
|
"eval_samples_per_second": 43.056, |
|
"eval_steps_per_second": 2.05, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.1153846153846154, |
|
"grad_norm": 0.2373046875, |
|
"learning_rate": 0.00019251587657449236, |
|
"loss": 1.2358, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.3076923076923075, |
|
"grad_norm": 0.427734375, |
|
"learning_rate": 0.0001897613727639014, |
|
"loss": 1.2118, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.1899, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.6923076923076925, |
|
"grad_norm": 0.7109375, |
|
"learning_rate": 0.00018305360832480117, |
|
"loss": 1.1662, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.8846153846153846, |
|
"grad_norm": 0.29296875, |
|
"learning_rate": 0.0001791305627069662, |
|
"loss": 1.1654, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.669499397277832, |
|
"eval_runtime": 0.492, |
|
"eval_samples_per_second": 42.682, |
|
"eval_steps_per_second": 2.032, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 1.1435, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.269230769230769, |
|
"grad_norm": 0.3359375, |
|
"learning_rate": 0.00017023442153554777, |
|
"loss": 1.1188, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.4615384615384617, |
|
"grad_norm": 0.25, |
|
"learning_rate": 0.0001653013984983585, |
|
"loss": 1.124, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.6538461538461537, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 0.0001600742264237979, |
|
"loss": 1.108, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 3.8461538461538463, |
|
"grad_norm": 0.5546875, |
|
"learning_rate": 0.00015457645101945046, |
|
"loss": 1.1056, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.654463291168213, |
|
"eval_runtime": 0.4845, |
|
"eval_samples_per_second": 43.339, |
|
"eval_steps_per_second": 2.064, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 4.038461538461538, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 0.00014883283692099112, |
|
"loss": 1.0918, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.230769230769231, |
|
"grad_norm": 0.2373046875, |
|
"learning_rate": 0.00014286925614030542, |
|
"loss": 1.0814, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.423076923076923, |
|
"grad_norm": 0.236328125, |
|
"learning_rate": 0.00013671257152545277, |
|
"loss": 1.0778, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"grad_norm": 0.2275390625, |
|
"learning_rate": 0.0001303905157574247, |
|
"loss": 1.0717, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.8076923076923075, |
|
"grad_norm": 0.46875, |
|
"learning_rate": 0.0001239315664287558, |
|
"loss": 1.0643, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.224609375, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 1.0615, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.6669278144836426, |
|
"eval_runtime": 0.4883, |
|
"eval_samples_per_second": 43.009, |
|
"eval_steps_per_second": 2.048, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 5.1923076923076925, |
|
"grad_norm": 0.208984375, |
|
"learning_rate": 0.00011071984957874479, |
|
"loss": 1.0503, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 5.384615384615385, |
|
"grad_norm": 0.2421875, |
|
"learning_rate": 0.00010402659401094152, |
|
"loss": 1.05, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 5.576923076923077, |
|
"grad_norm": 0.2578125, |
|
"learning_rate": 9.73152007189939e-05, |
|
"loss": 1.0449, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 5.769230769230769, |
|
"grad_norm": 0.22265625, |
|
"learning_rate": 9.061590105968208e-05, |
|
"loss": 1.0451, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 5.961538461538462, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 8.395887191422397e-05, |
|
"loss": 1.0347, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.668196439743042, |
|
"eval_runtime": 0.4907, |
|
"eval_samples_per_second": 42.799, |
|
"eval_steps_per_second": 2.038, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 6.153846153846154, |
|
"grad_norm": 0.42578125, |
|
"learning_rate": 7.73740997570278e-05, |
|
"loss": 1.0339, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 6.346153846153846, |
|
"grad_norm": 0.28515625, |
|
"learning_rate": 7.089124558212871e-05, |
|
"loss": 1.0281, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 6.538461538461538, |
|
"grad_norm": 0.41796875, |
|
"learning_rate": 6.453951129574644e-05, |
|
"loss": 1.0306, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 6.730769230769231, |
|
"grad_norm": 0.279296875, |
|
"learning_rate": 5.834750817679606e-05, |
|
"loss": 1.0268, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 6.923076923076923, |
|
"grad_norm": 0.2119140625, |
|
"learning_rate": 5.234312799786921e-05, |
|
"loss": 1.0216, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 2.674699544906616, |
|
"eval_runtime": 0.4901, |
|
"eval_samples_per_second": 42.844, |
|
"eval_steps_per_second": 2.04, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 7.115384615384615, |
|
"grad_norm": 0.205078125, |
|
"learning_rate": 4.6553417387219886e-05, |
|
"loss": 1.0217, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 7.3076923076923075, |
|
"grad_norm": 0.205078125, |
|
"learning_rate": 4.100445599768774e-05, |
|
"loss": 1.0184, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"grad_norm": 0.251953125, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 1.0163, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 7.6923076923076925, |
|
"grad_norm": 0.2080078125, |
|
"learning_rate": 3.072756464904006e-05, |
|
"loss": 1.0133, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 7.884615384615385, |
|
"grad_norm": 0.2099609375, |
|
"learning_rate": 2.6045926771976303e-05, |
|
"loss": 1.0204, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.679577112197876, |
|
"eval_runtime": 0.4883, |
|
"eval_samples_per_second": 43.003, |
|
"eval_steps_per_second": 2.048, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 8.076923076923077, |
|
"grad_norm": 0.19921875, |
|
"learning_rate": 2.1697413758237784e-05, |
|
"loss": 1.0097, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 8.26923076923077, |
|
"grad_norm": 0.208984375, |
|
"learning_rate": 1.7701613410634365e-05, |
|
"loss": 1.0168, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 8.461538461538462, |
|
"grad_norm": 0.2041015625, |
|
"learning_rate": 1.4076524743778319e-05, |
|
"loss": 1.0141, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 8.653846153846153, |
|
"grad_norm": 0.1962890625, |
|
"learning_rate": 1.083847690782972e-05, |
|
"loss": 1.0125, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 8.846153846153847, |
|
"grad_norm": 0.193359375, |
|
"learning_rate": 8.002055634117578e-06, |
|
"loss": 1.0071, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.6812119483947754, |
|
"eval_runtime": 0.4891, |
|
"eval_samples_per_second": 42.934, |
|
"eval_steps_per_second": 2.044, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 9.038461538461538, |
|
"grad_norm": 0.1884765625, |
|
"learning_rate": 5.580037533961546e-06, |
|
"loss": 1.0137, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"grad_norm": 0.1953125, |
|
"learning_rate": 3.5833325466437694e-06, |
|
"loss": 1.0116, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 9.423076923076923, |
|
"grad_norm": 0.1865234375, |
|
"learning_rate": 2.0209347957732328e-06, |
|
"loss": 1.0093, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 9.615384615384615, |
|
"grad_norm": 0.185546875, |
|
"learning_rate": 8.998820754091531e-07, |
|
"loss": 1.0136, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 9.807692307692308, |
|
"grad_norm": 0.1923828125, |
|
"learning_rate": 2.2522414843748618e-07, |
|
"loss": 1.008, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.1875, |
|
"learning_rate": 0.0, |
|
"loss": 1.0088, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.681701183319092, |
|
"eval_runtime": 0.4897, |
|
"eval_samples_per_second": 42.88, |
|
"eval_steps_per_second": 2.042, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 260, |
|
"total_flos": 4.092505436769485e+17, |
|
"train_loss": 1.2536941931797907, |
|
"train_runtime": 1396.0468, |
|
"train_samples_per_second": 47.677, |
|
"train_steps_per_second": 0.186 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 260, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.092505436769485e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|