|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.176056338028169, |
|
"eval_steps": 9, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007042253521126761, |
|
"grad_norm": 36.489925384521484, |
|
"learning_rate": 1e-05, |
|
"loss": 16.7418, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007042253521126761, |
|
"eval_loss": 8.365168571472168, |
|
"eval_runtime": 5.2379, |
|
"eval_samples_per_second": 22.91, |
|
"eval_steps_per_second": 2.864, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014084507042253521, |
|
"grad_norm": 54.84891128540039, |
|
"learning_rate": 2e-05, |
|
"loss": 15.7412, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02112676056338028, |
|
"grad_norm": 64.47398376464844, |
|
"learning_rate": 3e-05, |
|
"loss": 16.7694, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.028169014084507043, |
|
"grad_norm": 16.057498931884766, |
|
"learning_rate": 4e-05, |
|
"loss": 16.2653, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.035211267605633804, |
|
"grad_norm": 38.04544448852539, |
|
"learning_rate": 5e-05, |
|
"loss": 16.5994, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04225352112676056, |
|
"grad_norm": 32.003196716308594, |
|
"learning_rate": 6e-05, |
|
"loss": 16.4649, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04929577464788732, |
|
"grad_norm": 43.975486755371094, |
|
"learning_rate": 7e-05, |
|
"loss": 15.2708, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.056338028169014086, |
|
"grad_norm": 23.013490676879883, |
|
"learning_rate": 8e-05, |
|
"loss": 15.9144, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06338028169014084, |
|
"grad_norm": 21.642526626586914, |
|
"learning_rate": 9e-05, |
|
"loss": 13.8646, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06338028169014084, |
|
"eval_loss": 5.608087539672852, |
|
"eval_runtime": 5.2413, |
|
"eval_samples_per_second": 22.895, |
|
"eval_steps_per_second": 2.862, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07042253521126761, |
|
"grad_norm": 20.63976287841797, |
|
"learning_rate": 0.0001, |
|
"loss": 11.0946, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07746478873239436, |
|
"grad_norm": 22.017879486083984, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 9.8704, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08450704225352113, |
|
"grad_norm": 25.062084197998047, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 6.6076, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09154929577464789, |
|
"grad_norm": 17.164138793945312, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 6.0168, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09859154929577464, |
|
"grad_norm": 11.223540306091309, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 3.8373, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1056338028169014, |
|
"grad_norm": 7.377206325531006, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.6019, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11267605633802817, |
|
"grad_norm": 7.338124752044678, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 4.2857, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11971830985915492, |
|
"grad_norm": 6.844852447509766, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.2959, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1267605633802817, |
|
"grad_norm": 6.174362659454346, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.6541, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1267605633802817, |
|
"eval_loss": 1.0343117713928223, |
|
"eval_runtime": 5.2408, |
|
"eval_samples_per_second": 22.897, |
|
"eval_steps_per_second": 2.862, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.13380281690140844, |
|
"grad_norm": 11.295928001403809, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.0969, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.14084507042253522, |
|
"grad_norm": 5.165931224822998, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.1286, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14788732394366197, |
|
"grad_norm": 15.428300857543945, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.5216, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.15492957746478872, |
|
"grad_norm": 15.729449272155762, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 3.2321, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1619718309859155, |
|
"grad_norm": 6.993392467498779, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.019, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.16901408450704225, |
|
"grad_norm": 3.678471326828003, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.4714, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.176056338028169, |
|
"grad_norm": 4.6879801750183105, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.449, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.63366161088512e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|