gemma7b-lora-alpaca-11-v1 / trainer_state.json
chansung's picture
Model save
4ab1fb8 verified
raw
history blame
6.14 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007142857142857143,
"grad_norm": 106.10796356201172,
"learning_rate": 1.4285714285714285e-05,
"loss": 27.4831,
"step": 1
},
{
"epoch": 0.03571428571428571,
"grad_norm": 46.88632583618164,
"learning_rate": 7.142857142857143e-05,
"loss": 26.3105,
"step": 5
},
{
"epoch": 0.07142857142857142,
"grad_norm": 16.438461303710938,
"learning_rate": 0.00014285714285714287,
"loss": 20.4923,
"step": 10
},
{
"epoch": 0.10714285714285714,
"grad_norm": 9.703349113464355,
"learning_rate": 0.00019996891820008164,
"loss": 16.0475,
"step": 15
},
{
"epoch": 0.14285714285714285,
"grad_norm": 3.3252718448638916,
"learning_rate": 0.00019888308262251285,
"loss": 13.4483,
"step": 20
},
{
"epoch": 0.17857142857142858,
"grad_norm": 3.251009941101074,
"learning_rate": 0.0001962624246950012,
"loss": 12.6172,
"step": 25
},
{
"epoch": 0.21428571428571427,
"grad_norm": 5.38721227645874,
"learning_rate": 0.00019214762118704076,
"loss": 11.9827,
"step": 30
},
{
"epoch": 0.25,
"grad_norm": 10.532784461975098,
"learning_rate": 0.00018660254037844388,
"loss": 11.0145,
"step": 35
},
{
"epoch": 0.2857142857142857,
"grad_norm": 14.15513801574707,
"learning_rate": 0.00017971325072229226,
"loss": 9.3353,
"step": 40
},
{
"epoch": 0.32142857142857145,
"grad_norm": 21.529788970947266,
"learning_rate": 0.00017158668492597186,
"loss": 7.229,
"step": 45
},
{
"epoch": 0.35714285714285715,
"grad_norm": 15.934834480285645,
"learning_rate": 0.00016234898018587337,
"loss": 4.7381,
"step": 50
},
{
"epoch": 0.39285714285714285,
"grad_norm": 5.8390045166015625,
"learning_rate": 0.0001521435203379498,
"loss": 2.8963,
"step": 55
},
{
"epoch": 0.42857142857142855,
"grad_norm": 4.718578815460205,
"learning_rate": 0.00014112871031306119,
"loss": 2.4009,
"step": 60
},
{
"epoch": 0.4642857142857143,
"grad_norm": 3.6623828411102295,
"learning_rate": 0.00012947551744109043,
"loss": 2.1721,
"step": 65
},
{
"epoch": 0.5,
"grad_norm": 2.086202383041382,
"learning_rate": 0.00011736481776669306,
"loss": 2.0155,
"step": 70
},
{
"epoch": 0.5357142857142857,
"grad_norm": 1.1254757642745972,
"learning_rate": 0.00010498458856606972,
"loss": 1.9116,
"step": 75
},
{
"epoch": 0.5714285714285714,
"grad_norm": 1.9386184215545654,
"learning_rate": 9.252699064135758e-05,
"loss": 1.8129,
"step": 80
},
{
"epoch": 0.6071428571428571,
"grad_norm": 1.5302088260650635,
"learning_rate": 8.018538568006027e-05,
"loss": 1.7831,
"step": 85
},
{
"epoch": 0.6428571428571429,
"grad_norm": 1.059885859489441,
"learning_rate": 6.815133497483157e-05,
"loss": 1.7466,
"step": 90
},
{
"epoch": 0.6785714285714286,
"grad_norm": 1.2457741498947144,
"learning_rate": 5.6611626088244194e-05,
"loss": 1.7392,
"step": 95
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.7660045623779297,
"learning_rate": 4.574537361342407e-05,
"loss": 1.6987,
"step": 100
},
{
"epoch": 0.75,
"grad_norm": 0.7355481386184692,
"learning_rate": 3.5721239031346066e-05,
"loss": 1.6811,
"step": 105
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.7224046587944031,
"learning_rate": 2.669481281701739e-05,
"loss": 1.7008,
"step": 110
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.7918136715888977,
"learning_rate": 1.880619942841435e-05,
"loss": 1.6759,
"step": 115
},
{
"epoch": 0.8571428571428571,
"grad_norm": 1.5361932516098022,
"learning_rate": 1.2177842662977135e-05,
"loss": 1.6748,
"step": 120
},
{
"epoch": 0.8928571428571429,
"grad_norm": 1.0850142240524292,
"learning_rate": 6.9126251355795864e-06,
"loss": 1.6777,
"step": 125
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.7962830662727356,
"learning_rate": 3.092271377092215e-06,
"loss": 1.6705,
"step": 130
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.7234132885932922,
"learning_rate": 7.760793399827937e-07,
"loss": 1.6417,
"step": 135
},
{
"epoch": 1.0,
"grad_norm": 0.7538830637931824,
"learning_rate": 0.0,
"loss": 1.6542,
"step": 140
},
{
"epoch": 1.0,
"eval_loss": 1.6615785360336304,
"eval_runtime": 3.8187,
"eval_samples_per_second": 46.613,
"eval_steps_per_second": 0.786,
"step": 140
},
{
"epoch": 1.0,
"step": 140,
"total_flos": 4.268849030789857e+17,
"train_loss": 5.964417205538068,
"train_runtime": 1743.5737,
"train_samples_per_second": 10.269,
"train_steps_per_second": 0.08
}
],
"logging_steps": 5,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.268849030789857e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}