sayanbanerjee32's picture
Upload folder using huggingface_hub
a3728b2 verified
{
"epoch": 0.03262034577566522,
"global_step": 350,
"max_steps": 3000,
"logging_steps": 50,
"eval_steps": 50,
"save_steps": 50,
"train_batch_size": 8,
"num_train_epochs": 1,
"num_input_tokens_seen": 0,
"total_flos": 1.6004947053699072e+17,
"log_history": [
{
"loss": 11.5092,
"grad_norm": 27.41529655456543,
"learning_rate": 4.666666666666667e-05,
"epoch": 0.004660049396523603,
"step": 50
},
{
"eval_loss": 6.5413079261779785,
"eval_runtime": 484.9209,
"eval_samples_per_second": 1.862,
"eval_steps_per_second": 0.466,
"epoch": 0.004660049396523603,
"step": 50
},
{
"loss": 0.907,
"grad_norm": 0.3267304003238678,
"learning_rate": 9.999988344964554e-05,
"epoch": 0.009320098793047207,
"step": 100
},
{
"eval_loss": 0.2963584065437317,
"eval_runtime": 482.3744,
"eval_samples_per_second": 1.872,
"eval_steps_per_second": 0.469,
"epoch": 0.009320098793047207,
"step": 100
},
{
"loss": 0.2887,
"grad_norm": 0.23999030888080597,
"learning_rate": 9.992123261946325e-05,
"epoch": 0.01398014818957081,
"step": 150
},
{
"eval_loss": 0.26408717036247253,
"eval_runtime": 483.0929,
"eval_samples_per_second": 1.869,
"eval_steps_per_second": 0.468,
"epoch": 0.01398014818957081,
"step": 150
},
{
"loss": 0.273,
"grad_norm": 0.22597914934158325,
"learning_rate": 9.96971586146684e-05,
"epoch": 0.018640197586094413,
"step": 200
},
{
"eval_loss": 0.2530948221683502,
"eval_runtime": 483.695,
"eval_samples_per_second": 1.867,
"eval_steps_per_second": 0.467,
"epoch": 0.018640197586094413,
"step": 200
},
{
"loss": 0.2655,
"grad_norm": 0.2108173966407776,
"learning_rate": 9.932831417461484e-05,
"epoch": 0.023300246982618015,
"step": 250
},
{
"eval_loss": 0.2521112263202667,
"eval_runtime": 484.1571,
"eval_samples_per_second": 1.865,
"eval_steps_per_second": 0.467,
"epoch": 0.023300246982618015,
"step": 250
},
{
"loss": 0.2635,
"grad_norm": 0.16866298019886017,
"learning_rate": 9.881577376254393e-05,
"epoch": 0.02796029637914162,
"step": 300
},
{
"eval_loss": 0.24954503774642944,
"eval_runtime": 482.9845,
"eval_samples_per_second": 1.87,
"eval_steps_per_second": 0.468,
"epoch": 0.02796029637914162,
"step": 300
},
{
"loss": 0.2622,
"grad_norm": 0.19963641464710236,
"learning_rate": 9.816103043561648e-05,
"epoch": 0.03262034577566522,
"step": 350
},
{
"eval_loss": 0.24790766835212708,
"eval_runtime": 482.7586,
"eval_samples_per_second": 1.87,
"eval_steps_per_second": 0.468,
"epoch": 0.03262034577566522,
"step": 350
}
],
"best_metric": 0.24790766835212708,
"best_model_checkpoint": "./multimodal-phi3_5-mini-instruct-llava_adapter/checkpoint-350",
"is_local_process_zero": true,
"is_world_process_zero": true,
"is_hyper_param_search": false,
"trial_name": null,
"trial_params": null,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_training_stop": true,
"should_epoch_stop": false,
"should_save": true,
"should_evaluate": false,
"should_log": false
},
"attributes": {}
}
}
}