gemma-2b-medical_tuned / trainer_state.json
mavihsrr's picture
Upload 11 files
34c0fb9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 5.3643364906311035,
"learning_rate": 0.00019987369566060176,
"loss": 5.4501,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 7.935824394226074,
"learning_rate": 0.00019949510169813003,
"loss": 4.9761,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 6.881031036376953,
"learning_rate": 0.0001986685944207868,
"loss": 4.0797,
"step": 15
},
{
"epoch": 0.08,
"grad_norm": 6.994723320007324,
"learning_rate": 0.00019745268727865774,
"loss": 2.9864,
"step": 20
},
{
"epoch": 0.1,
"grad_norm": 1.695618748664856,
"learning_rate": 0.0001958521789017376,
"loss": 2.8715,
"step": 25
},
{
"epoch": 0.12,
"grad_norm": 5.028966903686523,
"learning_rate": 0.00019387338576538744,
"loss": 2.4938,
"step": 30
},
{
"epoch": 0.14,
"grad_norm": 3.31644344329834,
"learning_rate": 0.00019152411726209176,
"loss": 2.4925,
"step": 35
},
{
"epoch": 0.16,
"grad_norm": 10.450948715209961,
"learning_rate": 0.0001899405251566371,
"loss": 2.5641,
"step": 40
},
{
"epoch": 0.18,
"grad_norm": 2.144054412841797,
"learning_rate": 0.0001870183754669526,
"loss": 2.8727,
"step": 45
},
{
"epoch": 0.2,
"grad_norm": 92.50595092773438,
"learning_rate": 0.0001837528040042142,
"loss": 2.9301,
"step": 50
},
{
"epoch": 0.22,
"grad_norm": 26.027944564819336,
"learning_rate": 0.00018015669848708767,
"loss": 2.5419,
"step": 55
},
{
"epoch": 0.24,
"grad_norm": 12.144055366516113,
"learning_rate": 0.0001762442511011448,
"loss": 2.5677,
"step": 60
},
{
"epoch": 0.26,
"grad_norm": 12.395885467529297,
"learning_rate": 0.0001720309024887907,
"loss": 2.5559,
"step": 65
},
{
"epoch": 0.28,
"grad_norm": 3.1185450553894043,
"learning_rate": 0.00016753328081210245,
"loss": 2.5763,
"step": 70
},
{
"epoch": 0.3,
"grad_norm": 5.239765167236328,
"learning_rate": 0.00016276913612907007,
"loss": 2.4963,
"step": 75
},
{
"epoch": 0.32,
"grad_norm": 6.314518451690674,
"learning_rate": 0.00015775727034222675,
"loss": 2.5602,
"step": 80
},
{
"epoch": 0.34,
"grad_norm": 2.354008674621582,
"learning_rate": 0.0001525174629961296,
"loss": 2.6872,
"step": 85
},
{
"epoch": 0.36,
"grad_norm": 1.4496679306030273,
"learning_rate": 0.0001470703932165333,
"loss": 2.4969,
"step": 90
},
{
"epoch": 0.38,
"grad_norm": 9.645381927490234,
"learning_rate": 0.00014143755809932845,
"loss": 2.4725,
"step": 95
},
{
"epoch": 0.4,
"grad_norm": 1.5071710348129272,
"learning_rate": 0.00013564118787132506,
"loss": 2.3662,
"step": 100
},
{
"epoch": 0.42,
"grad_norm": 3.7371649742126465,
"learning_rate": 0.0001297041581577035,
"loss": 2.3748,
"step": 105
},
{
"epoch": 0.44,
"grad_norm": 3.71077823638916,
"learning_rate": 0.00012364989970237248,
"loss": 2.4374,
"step": 110
},
{
"epoch": 0.46,
"grad_norm": 1.1883786916732788,
"learning_rate": 0.00011750230589752762,
"loss": 2.2459,
"step": 115
},
{
"epoch": 0.48,
"grad_norm": 2.8230981826782227,
"learning_rate": 0.00011128563848734816,
"loss": 2.6068,
"step": 120
},
{
"epoch": 0.5,
"grad_norm": 6.169120788574219,
"learning_rate": 0.00010502443181797697,
"loss": 2.6697,
"step": 125
},
{
"epoch": 0.52,
"grad_norm": 1.7131496667861938,
"learning_rate": 9.874339601166473e-05,
"loss": 2.1541,
"step": 130
},
{
"epoch": 0.54,
"grad_norm": 2.3437445163726807,
"learning_rate": 9.246731944720675e-05,
"loss": 2.5094,
"step": 135
},
{
"epoch": 0.56,
"grad_norm": 1.0078409910202026,
"learning_rate": 8.62209709315362e-05,
"loss": 2.693,
"step": 140
},
{
"epoch": 0.58,
"grad_norm": 0.9157446026802063,
"learning_rate": 8.002900194855932e-05,
"loss": 2.4028,
"step": 145
},
{
"epoch": 0.6,
"grad_norm": 6.016546249389648,
"learning_rate": 7.391584937101033e-05,
"loss": 2.5198,
"step": 150
},
{
"epoch": 0.62,
"grad_norm": 2.6909899711608887,
"learning_rate": 6.790563901927907e-05,
"loss": 2.3054,
"step": 155
},
{
"epoch": 0.64,
"grad_norm": 3.064624071121216,
"learning_rate": 6.20220904478199e-05,
"loss": 2.3357,
"step": 160
},
{
"epoch": 0.66,
"grad_norm": 1.098503589630127,
"learning_rate": 5.6288423334906735e-05,
"loss": 2.1626,
"step": 165
},
{
"epoch": 0.68,
"grad_norm": 0.9467936158180237,
"learning_rate": 5.072726584517086e-05,
"loss": 2.398,
"step": 170
},
{
"epoch": 0.7,
"grad_norm": 2.01023006439209,
"learning_rate": 4.5360565326573104e-05,
"loss": 2.4083,
"step": 175
},
{
"epoch": 0.72,
"grad_norm": 1.5574626922607422,
"learning_rate": 4.020950169424815e-05,
"loss": 2.3016,
"step": 180
},
{
"epoch": 0.74,
"grad_norm": 1.4903650283813477,
"learning_rate": 3.52944038430556e-05,
"loss": 2.1456,
"step": 185
},
{
"epoch": 0.76,
"grad_norm": 2.6484365463256836,
"learning_rate": 3.063466941871952e-05,
"loss": 2.3501,
"step": 190
},
{
"epoch": 0.78,
"grad_norm": 1.781996488571167,
"learning_rate": 2.624868826418262e-05,
"loss": 2.4699,
"step": 195
},
{
"epoch": 0.8,
"grad_norm": 1.1791914701461792,
"learning_rate": 2.2153769843297667e-05,
"loss": 2.5444,
"step": 200
},
{
"epoch": 0.82,
"grad_norm": 9.811576843261719,
"learning_rate": 1.8366074928281607e-05,
"loss": 2.3116,
"step": 205
},
{
"epoch": 0.84,
"grad_norm": 1.9491240978240967,
"learning_rate": 1.4900551820530828e-05,
"loss": 2.5006,
"step": 210
},
{
"epoch": 0.86,
"grad_norm": 8.290270805358887,
"learning_rate": 1.1770877356504683e-05,
"loss": 2.2932,
"step": 215
},
{
"epoch": 0.88,
"grad_norm": 1.6194989681243896,
"learning_rate": 8.989402931500434e-06,
"loss": 2.22,
"step": 220
},
{
"epoch": 0.9,
"grad_norm": 2.6504359245300293,
"learning_rate": 6.5671057543387985e-06,
"loss": 2.4223,
"step": 225
},
{
"epoch": 0.92,
"grad_norm": 2.504225969314575,
"learning_rate": 4.513545525335705e-06,
"loss": 2.294,
"step": 230
},
{
"epoch": 0.94,
"grad_norm": 2.893841505050659,
"learning_rate": 2.836826708532603e-06,
"loss": 2.2694,
"step": 235
},
{
"epoch": 0.96,
"grad_norm": 1.2419345378875732,
"learning_rate": 1.543566547079467e-06,
"loss": 2.4213,
"step": 240
},
{
"epoch": 0.98,
"grad_norm": 2.36327862739563,
"learning_rate": 6.388689479991605e-07,
"loss": 2.6469,
"step": 245
},
{
"epoch": 1.0,
"grad_norm": 2.0228891372680664,
"learning_rate": 1.2630433939825327e-07,
"loss": 2.6638,
"step": 250
},
{
"epoch": 1.0,
"step": 250,
"total_flos": 2336253085188096.0,
"train_loss": 2.622329357147217,
"train_runtime": 354.2831,
"train_samples_per_second": 2.823,
"train_steps_per_second": 0.706
}
],
"logging_steps": 5,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 2336253085188096.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}