Nexspear's picture
Training in progress, step 25, checkpoint
2957484 verified
raw
history blame
3.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01754693805930865,
"eval_steps": 5,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007018775223723461,
"eval_loss": 10.378314971923828,
"eval_runtime": 5.1789,
"eval_samples_per_second": 463.422,
"eval_steps_per_second": 57.928,
"step": 1
},
{
"epoch": 0.002105632567117038,
"grad_norm": 0.07256367057561874,
"learning_rate": 3e-05,
"loss": 10.3792,
"step": 3
},
{
"epoch": 0.00350938761186173,
"eval_loss": 10.377887725830078,
"eval_runtime": 5.1938,
"eval_samples_per_second": 462.089,
"eval_steps_per_second": 57.761,
"step": 5
},
{
"epoch": 0.004211265134234076,
"grad_norm": 0.06716205924749374,
"learning_rate": 6e-05,
"loss": 10.3798,
"step": 6
},
{
"epoch": 0.0063168977013511145,
"grad_norm": 0.07351335883140564,
"learning_rate": 9e-05,
"loss": 10.3792,
"step": 9
},
{
"epoch": 0.00701877522372346,
"eval_loss": 10.376435279846191,
"eval_runtime": 5.235,
"eval_samples_per_second": 458.451,
"eval_steps_per_second": 57.306,
"step": 10
},
{
"epoch": 0.008422530268468151,
"grad_norm": 0.05700007453560829,
"learning_rate": 9.938441702975689e-05,
"loss": 10.3739,
"step": 12
},
{
"epoch": 0.01052816283558519,
"grad_norm": 0.07175807654857635,
"learning_rate": 9.619397662556435e-05,
"loss": 10.3717,
"step": 15
},
{
"epoch": 0.01052816283558519,
"eval_loss": 10.374283790588379,
"eval_runtime": 5.229,
"eval_samples_per_second": 458.977,
"eval_steps_per_second": 57.372,
"step": 15
},
{
"epoch": 0.012633795402702229,
"grad_norm": 0.09788131713867188,
"learning_rate": 9.045084971874738e-05,
"loss": 10.375,
"step": 18
},
{
"epoch": 0.01403755044744692,
"eval_loss": 10.371757507324219,
"eval_runtime": 5.1826,
"eval_samples_per_second": 463.084,
"eval_steps_per_second": 57.886,
"step": 20
},
{
"epoch": 0.014739427969819266,
"grad_norm": 0.08976900577545166,
"learning_rate": 8.247240241650918e-05,
"loss": 10.3723,
"step": 21
},
{
"epoch": 0.016845060536936303,
"grad_norm": 0.10850024223327637,
"learning_rate": 7.269952498697734e-05,
"loss": 10.3716,
"step": 24
},
{
"epoch": 0.01754693805930865,
"eval_loss": 10.368788719177246,
"eval_runtime": 5.2374,
"eval_samples_per_second": 458.24,
"eval_steps_per_second": 57.28,
"step": 25
}
],
"logging_steps": 3,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2844541845504.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}