bbytxt's picture
Training in progress, epoch 1, checkpoint
1c748a5 verified
raw
history blame
2.96 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0179640718562875,
"eval_steps": 11,
"global_step": 42,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023952095808383235,
"eval_loss": 0.5102269649505615,
"eval_runtime": 0.9638,
"eval_samples_per_second": 18.675,
"eval_steps_per_second": 9.338,
"step": 1
},
{
"epoch": 0.11976047904191617,
"grad_norm": 4.35016393661499,
"learning_rate": 5e-05,
"loss": 1.6408,
"step": 5
},
{
"epoch": 0.23952095808383234,
"grad_norm": 5.59436559677124,
"learning_rate": 0.0001,
"loss": 1.7789,
"step": 10
},
{
"epoch": 0.2634730538922156,
"eval_loss": 0.0816006287932396,
"eval_runtime": 0.9633,
"eval_samples_per_second": 18.686,
"eval_steps_per_second": 9.343,
"step": 11
},
{
"epoch": 0.3592814371257485,
"grad_norm": 1.298175573348999,
"learning_rate": 9.409606321741775e-05,
"loss": 0.1859,
"step": 15
},
{
"epoch": 0.47904191616766467,
"grad_norm": 0.027312569320201874,
"learning_rate": 7.777851165098012e-05,
"loss": 0.004,
"step": 20
},
{
"epoch": 0.5269461077844312,
"eval_loss": 0.00016009138198569417,
"eval_runtime": 0.9649,
"eval_samples_per_second": 18.654,
"eval_steps_per_second": 9.327,
"step": 22
},
{
"epoch": 0.5988023952095808,
"grad_norm": 0.010692027397453785,
"learning_rate": 5.490085701647805e-05,
"loss": 0.001,
"step": 25
},
{
"epoch": 0.718562874251497,
"grad_norm": 0.04211507365107536,
"learning_rate": 3.086582838174551e-05,
"loss": 0.0005,
"step": 30
},
{
"epoch": 0.7904191616766467,
"eval_loss": 8.166131738107651e-05,
"eval_runtime": 0.9645,
"eval_samples_per_second": 18.663,
"eval_steps_per_second": 9.332,
"step": 33
},
{
"epoch": 0.8383233532934131,
"grad_norm": 0.00778238708153367,
"learning_rate": 1.134947733186315e-05,
"loss": 0.0006,
"step": 35
},
{
"epoch": 0.9580838323353293,
"grad_norm": 0.010310592129826546,
"learning_rate": 9.607359798384785e-07,
"loss": 0.0002,
"step": 40
}
],
"logging_steps": 5,
"max_steps": 42,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7710808372936704.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}