nttx's picture
Training in progress, epoch 0, checkpoint
5c63dfd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.032550758839565444,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00016275379419782723,
"eval_loss": 1.5723884105682373,
"eval_runtime": 196.8327,
"eval_samples_per_second": 13.148,
"eval_steps_per_second": 6.574,
"step": 1
},
{
"epoch": 0.0008137689709891362,
"grad_norm": 0.35386717319488525,
"learning_rate": 5e-05,
"loss": 1.3648,
"step": 5
},
{
"epoch": 0.0016275379419782724,
"grad_norm": 0.4674012064933777,
"learning_rate": 0.0001,
"loss": 1.3267,
"step": 10
},
{
"epoch": 0.0024413069129674086,
"grad_norm": 0.5329536199569702,
"learning_rate": 9.98292246503335e-05,
"loss": 1.2973,
"step": 15
},
{
"epoch": 0.0032550758839565447,
"grad_norm": 0.6087198257446289,
"learning_rate": 9.931806517013612e-05,
"loss": 1.2921,
"step": 20
},
{
"epoch": 0.0040688448549456805,
"grad_norm": 0.714724600315094,
"learning_rate": 9.847001329696653e-05,
"loss": 1.2832,
"step": 25
},
{
"epoch": 0.004882613825934817,
"grad_norm": 1.2130167484283447,
"learning_rate": 9.729086208503174e-05,
"loss": 1.3753,
"step": 30
},
{
"epoch": 0.005696382796923953,
"grad_norm": 1.649317979812622,
"learning_rate": 9.578866633275288e-05,
"loss": 1.4577,
"step": 35
},
{
"epoch": 0.0065101517679130895,
"grad_norm": 2.20588755607605,
"learning_rate": 9.397368756032445e-05,
"loss": 1.0042,
"step": 40
},
{
"epoch": 0.007323920738902225,
"grad_norm": 2.594423294067383,
"learning_rate": 9.185832391312644e-05,
"loss": 1.1798,
"step": 45
},
{
"epoch": 0.008137689709891361,
"grad_norm": 9.170083045959473,
"learning_rate": 8.945702546981969e-05,
"loss": 1.7771,
"step": 50
},
{
"epoch": 0.008137689709891361,
"eval_loss": 1.2270060777664185,
"eval_runtime": 198.1221,
"eval_samples_per_second": 13.063,
"eval_steps_per_second": 6.531,
"step": 50
},
{
"epoch": 0.008951458680880498,
"grad_norm": 0.6143898963928223,
"learning_rate": 8.678619553365659e-05,
"loss": 1.2446,
"step": 55
},
{
"epoch": 0.009765227651869634,
"grad_norm": 0.5476758480072021,
"learning_rate": 8.386407858128706e-05,
"loss": 1.1483,
"step": 60
},
{
"epoch": 0.01057899662285877,
"grad_norm": 0.5872772932052612,
"learning_rate": 8.07106356344834e-05,
"loss": 1.1986,
"step": 65
},
{
"epoch": 0.011392765593847906,
"grad_norm": 0.6223112940788269,
"learning_rate": 7.734740790612136e-05,
"loss": 1.1515,
"step": 70
},
{
"epoch": 0.012206534564837043,
"grad_norm": 0.7705265879631042,
"learning_rate": 7.379736965185368e-05,
"loss": 1.1816,
"step": 75
},
{
"epoch": 0.013020303535826179,
"grad_norm": 1.007130742073059,
"learning_rate": 7.008477123264848e-05,
"loss": 1.1561,
"step": 80
},
{
"epoch": 0.013834072506815315,
"grad_norm": 1.347641110420227,
"learning_rate": 6.623497346023418e-05,
"loss": 1.1284,
"step": 85
},
{
"epoch": 0.01464784147780445,
"grad_norm": 1.3472156524658203,
"learning_rate": 6.227427435703997e-05,
"loss": 0.9544,
"step": 90
},
{
"epoch": 0.015461610448793588,
"grad_norm": 1.9317922592163086,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.9731,
"step": 95
},
{
"epoch": 0.016275379419782722,
"grad_norm": 6.384594917297363,
"learning_rate": 5.4128967273616625e-05,
"loss": 1.7312,
"step": 100
},
{
"epoch": 0.016275379419782722,
"eval_loss": 1.1505261659622192,
"eval_runtime": 198.1408,
"eval_samples_per_second": 13.061,
"eval_steps_per_second": 6.531,
"step": 100
},
{
"epoch": 0.01708914839077186,
"grad_norm": 0.4909661114215851,
"learning_rate": 5e-05,
"loss": 1.1048,
"step": 105
},
{
"epoch": 0.017902917361760997,
"grad_norm": 0.476285845041275,
"learning_rate": 4.5871032726383386e-05,
"loss": 1.1538,
"step": 110
},
{
"epoch": 0.018716686332750133,
"grad_norm": 0.5974850058555603,
"learning_rate": 4.17702704859633e-05,
"loss": 1.0978,
"step": 115
},
{
"epoch": 0.01953045530373927,
"grad_norm": 0.7488738894462585,
"learning_rate": 3.772572564296005e-05,
"loss": 1.1489,
"step": 120
},
{
"epoch": 0.020344224274728404,
"grad_norm": 0.8442608118057251,
"learning_rate": 3.3765026539765834e-05,
"loss": 1.1564,
"step": 125
},
{
"epoch": 0.02115799324571754,
"grad_norm": 1.2696480751037598,
"learning_rate": 2.991522876735154e-05,
"loss": 1.0814,
"step": 130
},
{
"epoch": 0.021971762216706676,
"grad_norm": 1.3110004663467407,
"learning_rate": 2.6202630348146324e-05,
"loss": 1.1289,
"step": 135
},
{
"epoch": 0.02278553118769581,
"grad_norm": 2.1278111934661865,
"learning_rate": 2.2652592093878666e-05,
"loss": 1.0119,
"step": 140
},
{
"epoch": 0.02359930015868495,
"grad_norm": 3.1425962448120117,
"learning_rate": 1.928936436551661e-05,
"loss": 1.0978,
"step": 145
},
{
"epoch": 0.024413069129674086,
"grad_norm": 5.378885746002197,
"learning_rate": 1.6135921418712956e-05,
"loss": 1.4381,
"step": 150
},
{
"epoch": 0.024413069129674086,
"eval_loss": 1.1307651996612549,
"eval_runtime": 198.058,
"eval_samples_per_second": 13.067,
"eval_steps_per_second": 6.533,
"step": 150
},
{
"epoch": 0.025226838100663222,
"grad_norm": 0.4325559437274933,
"learning_rate": 1.3213804466343421e-05,
"loss": 1.1082,
"step": 155
},
{
"epoch": 0.026040607071652358,
"grad_norm": 0.5688928961753845,
"learning_rate": 1.0542974530180327e-05,
"loss": 1.2067,
"step": 160
},
{
"epoch": 0.026854376042641494,
"grad_norm": 0.5977354049682617,
"learning_rate": 8.141676086873572e-06,
"loss": 1.0986,
"step": 165
},
{
"epoch": 0.02766814501363063,
"grad_norm": 0.7392883896827698,
"learning_rate": 6.026312439675552e-06,
"loss": 1.1662,
"step": 170
},
{
"epoch": 0.028481913984619765,
"grad_norm": 0.9673829674720764,
"learning_rate": 4.2113336672471245e-06,
"loss": 1.2514,
"step": 175
},
{
"epoch": 0.0292956829556089,
"grad_norm": 1.3040239810943604,
"learning_rate": 2.7091379149682685e-06,
"loss": 1.0292,
"step": 180
},
{
"epoch": 0.03010945192659804,
"grad_norm": 1.7316601276397705,
"learning_rate": 1.5299867030334814e-06,
"loss": 1.1521,
"step": 185
},
{
"epoch": 0.030923220897587176,
"grad_norm": 1.573816180229187,
"learning_rate": 6.819348298638839e-07,
"loss": 1.0154,
"step": 190
},
{
"epoch": 0.03173698986857631,
"grad_norm": 1.919352412223816,
"learning_rate": 1.7077534966650766e-07,
"loss": 1.0903,
"step": 195
},
{
"epoch": 0.032550758839565444,
"grad_norm": 5.344688892364502,
"learning_rate": 0.0,
"loss": 1.415,
"step": 200
},
{
"epoch": 0.032550758839565444,
"eval_loss": 1.125679612159729,
"eval_runtime": 198.0669,
"eval_samples_per_second": 13.066,
"eval_steps_per_second": 6.533,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.53121987501097e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}