BobaZooba's picture
Training in progress, step 25, checkpoint
486f460
raw
history blame
3.25 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0006218905472636816,
"eval_steps": 1000,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 1.9492,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 1.9778,
"step": 2
},
{
"epoch": 0.0,
"learning_rate": 4e-05,
"loss": 2.0908,
"step": 3
},
{
"epoch": 0.0,
"learning_rate": 4e-05,
"loss": 2.0456,
"step": 4
},
{
"epoch": 0.0,
"learning_rate": 8e-05,
"loss": 2.2288,
"step": 5
},
{
"epoch": 0.0,
"learning_rate": 0.00012,
"loss": 1.7521,
"step": 6
},
{
"epoch": 0.0,
"learning_rate": 0.00016,
"loss": 1.746,
"step": 7
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 1.6457,
"step": 8
},
{
"epoch": 0.0,
"learning_rate": 0.00019789473684210526,
"loss": 1.6226,
"step": 9
},
{
"epoch": 0.0,
"learning_rate": 0.00019578947368421054,
"loss": 1.643,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 0.0001936842105263158,
"loss": 1.7807,
"step": 11
},
{
"epoch": 0.0,
"learning_rate": 0.00019157894736842104,
"loss": 1.7257,
"step": 12
},
{
"epoch": 0.0,
"learning_rate": 0.00018947368421052632,
"loss": 1.6412,
"step": 13
},
{
"epoch": 0.0,
"learning_rate": 0.0001873684210526316,
"loss": 1.6792,
"step": 14
},
{
"epoch": 0.0,
"learning_rate": 0.00018526315789473685,
"loss": 1.4892,
"step": 15
},
{
"epoch": 0.0,
"learning_rate": 0.0001831578947368421,
"loss": 1.4418,
"step": 16
},
{
"epoch": 0.0,
"learning_rate": 0.00018105263157894739,
"loss": 1.5064,
"step": 17
},
{
"epoch": 0.0,
"learning_rate": 0.00017894736842105264,
"loss": 1.5943,
"step": 18
},
{
"epoch": 0.0,
"learning_rate": 0.0001768421052631579,
"loss": 1.419,
"step": 19
},
{
"epoch": 0.0,
"learning_rate": 0.00017473684210526317,
"loss": 1.5235,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 0.00017263157894736842,
"loss": 1.6562,
"step": 21
},
{
"epoch": 0.0,
"learning_rate": 0.0001705263157894737,
"loss": 1.4182,
"step": 22
},
{
"epoch": 0.0,
"learning_rate": 0.00016842105263157895,
"loss": 1.7132,
"step": 23
},
{
"epoch": 0.0,
"learning_rate": 0.00016631578947368423,
"loss": 1.7307,
"step": 24
},
{
"epoch": 0.0,
"learning_rate": 0.00016421052631578948,
"loss": 1.7079,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 100,
"num_train_epochs": 1,
"save_steps": 25,
"total_flos": 1319123636748288.0,
"trial_name": null,
"trial_params": null
}