oldiday's picture
Training in progress, step 100, checkpoint
18ee3f2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9900497512437811,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01990049751243781,
"eval_loss": 1.5809879302978516,
"eval_runtime": 5.5137,
"eval_samples_per_second": 15.416,
"eval_steps_per_second": 1.995,
"step": 1
},
{
"epoch": 0.05970149253731343,
"grad_norm": 0.7845205068588257,
"learning_rate": 1.5e-05,
"loss": 1.5216,
"step": 3
},
{
"epoch": 0.11940298507462686,
"grad_norm": 0.8492766618728638,
"learning_rate": 3e-05,
"loss": 1.594,
"step": 6
},
{
"epoch": 0.1791044776119403,
"grad_norm": 0.899258553981781,
"learning_rate": 4.5e-05,
"loss": 1.5144,
"step": 9
},
{
"epoch": 0.1791044776119403,
"eval_loss": 1.5270332098007202,
"eval_runtime": 5.5991,
"eval_samples_per_second": 15.181,
"eval_steps_per_second": 1.965,
"step": 9
},
{
"epoch": 0.23880597014925373,
"grad_norm": 1.0086854696273804,
"learning_rate": 4.993910125649561e-05,
"loss": 1.43,
"step": 12
},
{
"epoch": 0.29850746268656714,
"grad_norm": 0.8446040749549866,
"learning_rate": 4.962019382530521e-05,
"loss": 1.3627,
"step": 15
},
{
"epoch": 0.3582089552238806,
"grad_norm": 0.7049321532249451,
"learning_rate": 4.9031542398457974e-05,
"loss": 1.2934,
"step": 18
},
{
"epoch": 0.3582089552238806,
"eval_loss": 1.2862956523895264,
"eval_runtime": 5.6451,
"eval_samples_per_second": 15.057,
"eval_steps_per_second": 1.949,
"step": 18
},
{
"epoch": 0.417910447761194,
"grad_norm": 0.6026838421821594,
"learning_rate": 4.817959636416969e-05,
"loss": 1.2678,
"step": 21
},
{
"epoch": 0.47761194029850745,
"grad_norm": 0.6281826496124268,
"learning_rate": 4.707368982147318e-05,
"loss": 1.2053,
"step": 24
},
{
"epoch": 0.5373134328358209,
"grad_norm": 0.626505434513092,
"learning_rate": 4.572593931387604e-05,
"loss": 1.1997,
"step": 27
},
{
"epoch": 0.5373134328358209,
"eval_loss": 1.1615190505981445,
"eval_runtime": 5.6361,
"eval_samples_per_second": 15.081,
"eval_steps_per_second": 1.952,
"step": 27
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.5600003600120544,
"learning_rate": 4.415111107797445e-05,
"loss": 1.1329,
"step": 30
},
{
"epoch": 0.6567164179104478,
"grad_norm": 0.554161787033081,
"learning_rate": 4.2366459261474933e-05,
"loss": 1.0888,
"step": 33
},
{
"epoch": 0.7164179104477612,
"grad_norm": 0.5243486166000366,
"learning_rate": 4.039153688314145e-05,
"loss": 1.0326,
"step": 36
},
{
"epoch": 0.7164179104477612,
"eval_loss": 1.0722781419754028,
"eval_runtime": 5.6371,
"eval_samples_per_second": 15.079,
"eval_steps_per_second": 1.951,
"step": 36
},
{
"epoch": 0.7761194029850746,
"grad_norm": 0.5589205026626587,
"learning_rate": 3.824798160583012e-05,
"loss": 1.0332,
"step": 39
},
{
"epoch": 0.835820895522388,
"grad_norm": 0.5794256329536438,
"learning_rate": 3.5959278669726935e-05,
"loss": 1.0239,
"step": 42
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.5618190169334412,
"learning_rate": 3.355050358314172e-05,
"loss": 0.9851,
"step": 45
},
{
"epoch": 0.8955223880597015,
"eval_loss": 1.0372254848480225,
"eval_runtime": 5.6453,
"eval_samples_per_second": 15.057,
"eval_steps_per_second": 1.949,
"step": 45
},
{
"epoch": 0.9552238805970149,
"grad_norm": 0.5606002807617188,
"learning_rate": 3.104804738999169e-05,
"loss": 1.0615,
"step": 48
},
{
"epoch": 1.0149253731343284,
"grad_norm": 1.186905860900879,
"learning_rate": 2.8479327524001636e-05,
"loss": 1.1214,
"step": 51
},
{
"epoch": 1.0746268656716418,
"grad_norm": 0.5500369668006897,
"learning_rate": 2.587248741756253e-05,
"loss": 0.8825,
"step": 54
},
{
"epoch": 1.0746268656716418,
"eval_loss": 1.0109927654266357,
"eval_runtime": 5.6371,
"eval_samples_per_second": 15.079,
"eval_steps_per_second": 1.951,
"step": 54
},
{
"epoch": 1.1343283582089552,
"grad_norm": 0.6391798853874207,
"learning_rate": 2.3256088156396868e-05,
"loss": 0.9475,
"step": 57
},
{
"epoch": 1.1940298507462686,
"grad_norm": 0.5408726334571838,
"learning_rate": 2.0658795558326743e-05,
"loss": 0.9773,
"step": 60
},
{
"epoch": 1.2537313432835822,
"grad_norm": 0.5797409415245056,
"learning_rate": 1.8109066104575023e-05,
"loss": 0.9214,
"step": 63
},
{
"epoch": 1.2537313432835822,
"eval_loss": 0.9961395263671875,
"eval_runtime": 5.6389,
"eval_samples_per_second": 15.074,
"eval_steps_per_second": 1.951,
"step": 63
},
{
"epoch": 1.3134328358208955,
"grad_norm": 0.5694305300712585,
"learning_rate": 1.56348351646022e-05,
"loss": 0.9053,
"step": 66
},
{
"epoch": 1.373134328358209,
"grad_norm": 0.5325923562049866,
"learning_rate": 1.3263210930352737e-05,
"loss": 0.922,
"step": 69
},
{
"epoch": 1.4328358208955223,
"grad_norm": 0.6161549091339111,
"learning_rate": 1.1020177413231334e-05,
"loss": 0.9302,
"step": 72
},
{
"epoch": 1.4328358208955223,
"eval_loss": 0.9865135550498962,
"eval_runtime": 5.6362,
"eval_samples_per_second": 15.081,
"eval_steps_per_second": 1.952,
"step": 72
},
{
"epoch": 1.4925373134328357,
"grad_norm": 0.6062175035476685,
"learning_rate": 8.930309757836517e-06,
"loss": 0.9556,
"step": 75
},
{
"epoch": 1.5522388059701493,
"grad_norm": 0.6155171990394592,
"learning_rate": 7.016504991533726e-06,
"loss": 0.9589,
"step": 78
},
{
"epoch": 1.6119402985074627,
"grad_norm": 0.5605164766311646,
"learning_rate": 5.299731159831953e-06,
"loss": 0.8919,
"step": 81
},
{
"epoch": 1.6119402985074627,
"eval_loss": 0.9810183644294739,
"eval_runtime": 5.6412,
"eval_samples_per_second": 15.068,
"eval_steps_per_second": 1.95,
"step": 81
},
{
"epoch": 1.671641791044776,
"grad_norm": 0.6275697350502014,
"learning_rate": 3.798797596089351e-06,
"loss": 0.9247,
"step": 84
},
{
"epoch": 1.7313432835820897,
"grad_norm": 0.5964632630348206,
"learning_rate": 2.5301488425208296e-06,
"loss": 0.9138,
"step": 87
},
{
"epoch": 1.7910447761194028,
"grad_norm": 0.5972414016723633,
"learning_rate": 1.5076844803522922e-06,
"loss": 0.8821,
"step": 90
},
{
"epoch": 1.7910447761194028,
"eval_loss": 0.9789036512374878,
"eval_runtime": 5.6422,
"eval_samples_per_second": 15.065,
"eval_steps_per_second": 1.95,
"step": 90
},
{
"epoch": 1.8507462686567164,
"grad_norm": 0.6307412385940552,
"learning_rate": 7.426068431000882e-07,
"loss": 0.9657,
"step": 93
},
{
"epoch": 1.9104477611940298,
"grad_norm": 0.5744690895080566,
"learning_rate": 2.4329828146074095e-07,
"loss": 0.8921,
"step": 96
},
{
"epoch": 1.9701492537313432,
"grad_norm": 0.6007259488105774,
"learning_rate": 1.522932452260595e-08,
"loss": 0.9583,
"step": 99
},
{
"epoch": 1.9701492537313432,
"eval_loss": 0.9787780046463013,
"eval_runtime": 5.6467,
"eval_samples_per_second": 15.053,
"eval_steps_per_second": 1.948,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3065079924300186e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}