|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0055793193230425885, |
|
"eval_steps": 2, |
|
"global_step": 15, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003719546215361726, |
|
"grad_norm": 28.77135467529297, |
|
"learning_rate": 2e-05, |
|
"loss": 45.9604, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003719546215361726, |
|
"eval_loss": 11.40648078918457, |
|
"eval_runtime": 137.0275, |
|
"eval_samples_per_second": 8.261, |
|
"eval_steps_per_second": 4.131, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007439092430723452, |
|
"grad_norm": 23.410951614379883, |
|
"learning_rate": 4e-05, |
|
"loss": 46.8527, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0007439092430723452, |
|
"eval_loss": 11.40257453918457, |
|
"eval_runtime": 137.8996, |
|
"eval_samples_per_second": 8.209, |
|
"eval_steps_per_second": 4.104, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0011158638646085177, |
|
"grad_norm": 33.30792999267578, |
|
"learning_rate": 6e-05, |
|
"loss": 44.8008, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0014878184861446904, |
|
"grad_norm": 27.510290145874023, |
|
"learning_rate": 8e-05, |
|
"loss": 48.3234, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0014878184861446904, |
|
"eval_loss": 11.253341674804688, |
|
"eval_runtime": 138.0011, |
|
"eval_samples_per_second": 8.203, |
|
"eval_steps_per_second": 4.101, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.001859773107680863, |
|
"grad_norm": 23.642324447631836, |
|
"learning_rate": 0.0001, |
|
"loss": 42.2976, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0022317277292170355, |
|
"grad_norm": 37.700042724609375, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 42.5346, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0022317277292170355, |
|
"eval_loss": 10.256876945495605, |
|
"eval_runtime": 138.1284, |
|
"eval_samples_per_second": 8.195, |
|
"eval_steps_per_second": 4.098, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.002603682350753208, |
|
"grad_norm": 46.39228057861328, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 39.644, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0029756369722893808, |
|
"grad_norm": 47.501041412353516, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 35.7466, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0029756369722893808, |
|
"eval_loss": 7.803372859954834, |
|
"eval_runtime": 138.0673, |
|
"eval_samples_per_second": 8.199, |
|
"eval_steps_per_second": 4.099, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0033475915938255534, |
|
"grad_norm": 45.90718460083008, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 31.2201, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.003719546215361726, |
|
"grad_norm": 51.53364181518555, |
|
"learning_rate": 5e-05, |
|
"loss": 23.9333, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003719546215361726, |
|
"eval_loss": 5.604811191558838, |
|
"eval_runtime": 138.2369, |
|
"eval_samples_per_second": 8.189, |
|
"eval_steps_per_second": 4.094, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004091500836897899, |
|
"grad_norm": 53.22500991821289, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 27.342, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.004463455458434071, |
|
"grad_norm": 59.24814224243164, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 18.2533, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.004463455458434071, |
|
"eval_loss": 4.373373985290527, |
|
"eval_runtime": 138.3748, |
|
"eval_samples_per_second": 8.181, |
|
"eval_steps_per_second": 4.09, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.004835410079970244, |
|
"grad_norm": 45.4249153137207, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 7.9498, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.005207364701506416, |
|
"grad_norm": 51.06169509887695, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 16.3781, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.005207364701506416, |
|
"eval_loss": 4.014023780822754, |
|
"eval_runtime": 138.2266, |
|
"eval_samples_per_second": 8.189, |
|
"eval_steps_per_second": 4.095, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0055793193230425885, |
|
"grad_norm": 36.74103927612305, |
|
"learning_rate": 0.0, |
|
"loss": 14.7442, |
|
"step": 15 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 15, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9498612518092800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|