lesso02's picture
Training in progress, step 25, checkpoint
e6293f2 verified
raw
history blame
5.62 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0022476961114857273,
"eval_steps": 9,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.990784445942908e-05,
"grad_norm": 15.35192584991455,
"learning_rate": 1e-05,
"loss": 12.6326,
"step": 1
},
{
"epoch": 8.990784445942908e-05,
"eval_loss": 12.586104393005371,
"eval_runtime": 276.5266,
"eval_samples_per_second": 33.874,
"eval_steps_per_second": 4.235,
"step": 1
},
{
"epoch": 0.00017981568891885816,
"grad_norm": 16.091136932373047,
"learning_rate": 2e-05,
"loss": 12.5988,
"step": 2
},
{
"epoch": 0.00026972353337828726,
"grad_norm": 14.455317497253418,
"learning_rate": 3e-05,
"loss": 12.5276,
"step": 3
},
{
"epoch": 0.0003596313778377163,
"grad_norm": 15.562151908874512,
"learning_rate": 4e-05,
"loss": 12.5544,
"step": 4
},
{
"epoch": 0.0004495392222971454,
"grad_norm": 13.742938995361328,
"learning_rate": 5e-05,
"loss": 12.5895,
"step": 5
},
{
"epoch": 0.0005394470667565745,
"grad_norm": 14.714332580566406,
"learning_rate": 6e-05,
"loss": 12.6172,
"step": 6
},
{
"epoch": 0.0006293549112160036,
"grad_norm": 11.049238204956055,
"learning_rate": 7e-05,
"loss": 12.4842,
"step": 7
},
{
"epoch": 0.0007192627556754326,
"grad_norm": 13.16402816772461,
"learning_rate": 8e-05,
"loss": 12.302,
"step": 8
},
{
"epoch": 0.0008091706001348617,
"grad_norm": 12.623836517333984,
"learning_rate": 9e-05,
"loss": 12.2529,
"step": 9
},
{
"epoch": 0.0008091706001348617,
"eval_loss": 12.082995414733887,
"eval_runtime": 277.7717,
"eval_samples_per_second": 33.722,
"eval_steps_per_second": 4.216,
"step": 9
},
{
"epoch": 0.0008990784445942908,
"grad_norm": 10.00021743774414,
"learning_rate": 0.0001,
"loss": 12.0075,
"step": 10
},
{
"epoch": 0.0009889862890537198,
"grad_norm": 9.958125114440918,
"learning_rate": 9.99695413509548e-05,
"loss": 11.9523,
"step": 11
},
{
"epoch": 0.001078894133513149,
"grad_norm": 10.316842079162598,
"learning_rate": 9.987820251299122e-05,
"loss": 11.8709,
"step": 12
},
{
"epoch": 0.001168801977972578,
"grad_norm": 10.072561264038086,
"learning_rate": 9.972609476841367e-05,
"loss": 11.7194,
"step": 13
},
{
"epoch": 0.0012587098224320073,
"grad_norm": 9.017184257507324,
"learning_rate": 9.951340343707852e-05,
"loss": 11.5661,
"step": 14
},
{
"epoch": 0.0013486176668914363,
"grad_norm": 8.668551445007324,
"learning_rate": 9.924038765061042e-05,
"loss": 11.4991,
"step": 15
},
{
"epoch": 0.0014385255113508653,
"grad_norm": 8.481941223144531,
"learning_rate": 9.890738003669029e-05,
"loss": 11.3203,
"step": 16
},
{
"epoch": 0.0015284333558102945,
"grad_norm": 7.682249546051025,
"learning_rate": 9.851478631379982e-05,
"loss": 11.2815,
"step": 17
},
{
"epoch": 0.0016183412002697235,
"grad_norm": 7.486232280731201,
"learning_rate": 9.806308479691595e-05,
"loss": 11.1782,
"step": 18
},
{
"epoch": 0.0016183412002697235,
"eval_loss": 11.049551010131836,
"eval_runtime": 277.8127,
"eval_samples_per_second": 33.717,
"eval_steps_per_second": 4.215,
"step": 18
},
{
"epoch": 0.0017082490447291527,
"grad_norm": 7.089673042297363,
"learning_rate": 9.755282581475769e-05,
"loss": 10.9657,
"step": 19
},
{
"epoch": 0.0017981568891885817,
"grad_norm": 6.506858825683594,
"learning_rate": 9.698463103929542e-05,
"loss": 10.9544,
"step": 20
},
{
"epoch": 0.001888064733648011,
"grad_norm": 6.114770412445068,
"learning_rate": 9.635919272833938e-05,
"loss": 10.8534,
"step": 21
},
{
"epoch": 0.0019779725781074397,
"grad_norm": 5.774037837982178,
"learning_rate": 9.567727288213005e-05,
"loss": 10.7668,
"step": 22
},
{
"epoch": 0.002067880422566869,
"grad_norm": 5.377146244049072,
"learning_rate": 9.493970231495835e-05,
"loss": 10.7447,
"step": 23
},
{
"epoch": 0.002157788267026298,
"grad_norm": 4.897111892700195,
"learning_rate": 9.414737964294636e-05,
"loss": 10.5888,
"step": 24
},
{
"epoch": 0.0022476961114857273,
"grad_norm": 4.556328773498535,
"learning_rate": 9.330127018922194e-05,
"loss": 10.5711,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 13303264837632.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}