gemma7b-lora-alpaca-11-v1 / trainer_state.json
chansung's picture
Model save
e8f608c verified
raw
history blame
6.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007142857142857143,
"grad_norm": 106.10701751708984,
"learning_rate": 1.4285714285714285e-05,
"loss": 27.4831,
"step": 1
},
{
"epoch": 0.03571428571428571,
"grad_norm": 46.149723052978516,
"learning_rate": 7.142857142857143e-05,
"loss": 26.2992,
"step": 5
},
{
"epoch": 0.07142857142857142,
"grad_norm": 16.458189010620117,
"learning_rate": 0.00014285714285714287,
"loss": 20.4929,
"step": 10
},
{
"epoch": 0.10714285714285714,
"grad_norm": 9.764805793762207,
"learning_rate": 0.00019996891820008164,
"loss": 16.0513,
"step": 15
},
{
"epoch": 0.14285714285714285,
"grad_norm": 3.3296561241149902,
"learning_rate": 0.00019888308262251285,
"loss": 13.4526,
"step": 20
},
{
"epoch": 0.17857142857142858,
"grad_norm": 3.235478162765503,
"learning_rate": 0.0001962624246950012,
"loss": 12.6172,
"step": 25
},
{
"epoch": 0.21428571428571427,
"grad_norm": 5.432589530944824,
"learning_rate": 0.00019214762118704076,
"loss": 11.9808,
"step": 30
},
{
"epoch": 0.25,
"grad_norm": 10.644349098205566,
"learning_rate": 0.00018660254037844388,
"loss": 11.0065,
"step": 35
},
{
"epoch": 0.2857142857142857,
"grad_norm": 14.282907485961914,
"learning_rate": 0.00017971325072229226,
"loss": 9.3131,
"step": 40
},
{
"epoch": 0.32142857142857145,
"grad_norm": 21.60712432861328,
"learning_rate": 0.00017158668492597186,
"loss": 7.1723,
"step": 45
},
{
"epoch": 0.35714285714285715,
"grad_norm": 15.418853759765625,
"learning_rate": 0.00016234898018587337,
"loss": 4.6599,
"step": 50
},
{
"epoch": 0.39285714285714285,
"grad_norm": 5.335650444030762,
"learning_rate": 0.0001521435203379498,
"loss": 2.855,
"step": 55
},
{
"epoch": 0.42857142857142855,
"grad_norm": 4.913125038146973,
"learning_rate": 0.00014112871031306119,
"loss": 2.3938,
"step": 60
},
{
"epoch": 0.4642857142857143,
"grad_norm": 3.951244592666626,
"learning_rate": 0.00012947551744109043,
"loss": 2.1646,
"step": 65
},
{
"epoch": 0.5,
"grad_norm": 1.6933783292770386,
"learning_rate": 0.00011736481776669306,
"loss": 2.0061,
"step": 70
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.9344208836555481,
"learning_rate": 0.00010498458856606972,
"loss": 1.9092,
"step": 75
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.9437263011932373,
"learning_rate": 9.252699064135758e-05,
"loss": 1.8086,
"step": 80
},
{
"epoch": 0.6071428571428571,
"grad_norm": 1.5205684900283813,
"learning_rate": 8.018538568006027e-05,
"loss": 1.7838,
"step": 85
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.8009554743766785,
"learning_rate": 6.815133497483157e-05,
"loss": 1.7433,
"step": 90
},
{
"epoch": 0.6785714285714286,
"grad_norm": 0.9393540620803833,
"learning_rate": 5.6611626088244194e-05,
"loss": 1.736,
"step": 95
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.6479978561401367,
"learning_rate": 4.574537361342407e-05,
"loss": 1.6976,
"step": 100
},
{
"epoch": 0.75,
"grad_norm": 0.7829992771148682,
"learning_rate": 3.5721239031346066e-05,
"loss": 1.6793,
"step": 105
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.7236832976341248,
"learning_rate": 2.669481281701739e-05,
"loss": 1.7,
"step": 110
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.7881686687469482,
"learning_rate": 1.880619942841435e-05,
"loss": 1.6778,
"step": 115
},
{
"epoch": 0.8571428571428571,
"grad_norm": 1.7755261659622192,
"learning_rate": 1.2177842662977135e-05,
"loss": 1.6757,
"step": 120
},
{
"epoch": 0.8928571428571429,
"grad_norm": 1.0512515306472778,
"learning_rate": 6.9126251355795864e-06,
"loss": 1.6766,
"step": 125
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.7070327997207642,
"learning_rate": 3.092271377092215e-06,
"loss": 1.67,
"step": 130
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.7091158628463745,
"learning_rate": 7.760793399827937e-07,
"loss": 1.6459,
"step": 135
},
{
"epoch": 1.0,
"grad_norm": 0.7374704480171204,
"learning_rate": 0.0,
"loss": 1.6561,
"step": 140
},
{
"epoch": 1.0,
"eval_loss": 1.6581236124038696,
"eval_runtime": 3.8242,
"eval_samples_per_second": 46.545,
"eval_steps_per_second": 0.784,
"step": 140
},
{
"epoch": 1.0,
"step": 140,
"total_flos": 4.268849030789857e+17,
"train_loss": 5.955788305827549,
"train_runtime": 1733.7968,
"train_samples_per_second": 10.326,
"train_steps_per_second": 0.081
}
],
"logging_steps": 5,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.268849030789857e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}