gemma7b-lora-alpaca-11-v1 / trainer_state.json
chansung's picture
Model save
7d12a2e verified
raw
history blame
6.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007142857142857143,
"grad_norm": 104.33383178710938,
"learning_rate": 1.4285714285714285e-05,
"loss": 26.8049,
"step": 1
},
{
"epoch": 0.03571428571428571,
"grad_norm": 53.856834411621094,
"learning_rate": 7.142857142857143e-05,
"loss": 26.2527,
"step": 5
},
{
"epoch": 0.07142857142857142,
"grad_norm": 16.46755599975586,
"learning_rate": 0.00014285714285714287,
"loss": 20.5587,
"step": 10
},
{
"epoch": 0.10714285714285714,
"grad_norm": 9.823058128356934,
"learning_rate": 0.00019996891820008164,
"loss": 16.1016,
"step": 15
},
{
"epoch": 0.14285714285714285,
"grad_norm": 3.4154016971588135,
"learning_rate": 0.00019888308262251285,
"loss": 13.4739,
"step": 20
},
{
"epoch": 0.17857142857142858,
"grad_norm": 3.293179988861084,
"learning_rate": 0.0001962624246950012,
"loss": 12.6248,
"step": 25
},
{
"epoch": 0.21428571428571427,
"grad_norm": 5.406515121459961,
"learning_rate": 0.00019214762118704076,
"loss": 11.9858,
"step": 30
},
{
"epoch": 0.25,
"grad_norm": 10.7940034866333,
"learning_rate": 0.00018660254037844388,
"loss": 11.0004,
"step": 35
},
{
"epoch": 0.2857142857142857,
"grad_norm": 14.75281047821045,
"learning_rate": 0.00017971325072229226,
"loss": 9.2523,
"step": 40
},
{
"epoch": 0.32142857142857145,
"grad_norm": 22.644073486328125,
"learning_rate": 0.00017158668492597186,
"loss": 7.0092,
"step": 45
},
{
"epoch": 0.35714285714285715,
"grad_norm": 14.965047836303711,
"learning_rate": 0.00016234898018587337,
"loss": 4.4455,
"step": 50
},
{
"epoch": 0.39285714285714285,
"grad_norm": 5.274498462677002,
"learning_rate": 0.0001521435203379498,
"loss": 2.7643,
"step": 55
},
{
"epoch": 0.42857142857142855,
"grad_norm": 4.607834339141846,
"learning_rate": 0.00014112871031306119,
"loss": 2.3451,
"step": 60
},
{
"epoch": 0.4642857142857143,
"grad_norm": 3.502431631088257,
"learning_rate": 0.00012947551744109043,
"loss": 2.1217,
"step": 65
},
{
"epoch": 0.5,
"grad_norm": 1.5408211946487427,
"learning_rate": 0.00011736481776669306,
"loss": 1.979,
"step": 70
},
{
"epoch": 0.5357142857142857,
"grad_norm": 1.2710477113723755,
"learning_rate": 0.00010498458856606972,
"loss": 1.8913,
"step": 75
},
{
"epoch": 0.5714285714285714,
"grad_norm": 1.2541048526763916,
"learning_rate": 9.252699064135758e-05,
"loss": 1.7956,
"step": 80
},
{
"epoch": 0.6071428571428571,
"grad_norm": 1.3496887683868408,
"learning_rate": 8.018538568006027e-05,
"loss": 1.7699,
"step": 85
},
{
"epoch": 0.6428571428571429,
"grad_norm": 1.127426028251648,
"learning_rate": 6.815133497483157e-05,
"loss": 1.7305,
"step": 90
},
{
"epoch": 0.6785714285714286,
"grad_norm": 1.5040632486343384,
"learning_rate": 5.6611626088244194e-05,
"loss": 1.728,
"step": 95
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.3170948028564453,
"learning_rate": 4.574537361342407e-05,
"loss": 1.6858,
"step": 100
},
{
"epoch": 0.75,
"grad_norm": 0.6884583830833435,
"learning_rate": 3.5721239031346066e-05,
"loss": 1.6739,
"step": 105
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.6821992993354797,
"learning_rate": 2.669481281701739e-05,
"loss": 1.6896,
"step": 110
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.8719048500061035,
"learning_rate": 1.880619942841435e-05,
"loss": 1.6684,
"step": 115
},
{
"epoch": 0.8571428571428571,
"grad_norm": 1.8157048225402832,
"learning_rate": 1.2177842662977135e-05,
"loss": 1.6667,
"step": 120
},
{
"epoch": 0.8928571428571429,
"grad_norm": 1.1482408046722412,
"learning_rate": 6.9126251355795864e-06,
"loss": 1.6691,
"step": 125
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.786839485168457,
"learning_rate": 3.092271377092215e-06,
"loss": 1.6592,
"step": 130
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.6941242814064026,
"learning_rate": 7.760793399827937e-07,
"loss": 1.6378,
"step": 135
},
{
"epoch": 1.0,
"grad_norm": 0.7584343552589417,
"learning_rate": 0.0,
"loss": 1.6467,
"step": 140
},
{
"epoch": 1.0,
"eval_loss": 1.6531758308410645,
"eval_runtime": 1.3594,
"eval_samples_per_second": 130.936,
"eval_steps_per_second": 2.207,
"step": 140
},
{
"epoch": 1.0,
"step": 140,
"total_flos": 4.268849030789857e+17,
"train_loss": 5.926357351030622,
"train_runtime": 363.5109,
"train_samples_per_second": 49.253,
"train_steps_per_second": 0.385
}
],
"logging_steps": 5,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.268849030789857e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}