|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2604166666666667, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005208333333333333, |
|
"grad_norm": 1.149965763092041, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 3.1312, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005208333333333333, |
|
"eval_loss": 3.160311460494995, |
|
"eval_runtime": 2.6854, |
|
"eval_samples_per_second": 60.326, |
|
"eval_steps_per_second": 7.82, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010416666666666666, |
|
"grad_norm": 1.072540521621704, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 3.1759, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.015625, |
|
"grad_norm": 1.0272471904754639, |
|
"learning_rate": 6e-06, |
|
"loss": 3.1055, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.020833333333333332, |
|
"grad_norm": 1.1299035549163818, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 3.2217, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.026041666666666668, |
|
"grad_norm": 0.9919382929801941, |
|
"learning_rate": 1e-05, |
|
"loss": 3.028, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03125, |
|
"grad_norm": 1.0636495351791382, |
|
"learning_rate": 1.2e-05, |
|
"loss": 3.2194, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.036458333333333336, |
|
"grad_norm": 1.0188517570495605, |
|
"learning_rate": 1.4e-05, |
|
"loss": 3.1403, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.041666666666666664, |
|
"grad_norm": 1.1026723384857178, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 3.1992, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.046875, |
|
"grad_norm": 0.966528058052063, |
|
"learning_rate": 1.8e-05, |
|
"loss": 3.0237, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.046875, |
|
"eval_loss": 3.1478817462921143, |
|
"eval_runtime": 2.0938, |
|
"eval_samples_per_second": 77.372, |
|
"eval_steps_per_second": 10.03, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.052083333333333336, |
|
"grad_norm": 0.8940801620483398, |
|
"learning_rate": 2e-05, |
|
"loss": 3.0852, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.057291666666666664, |
|
"grad_norm": 1.0513722896575928, |
|
"learning_rate": 1.999390827019096e-05, |
|
"loss": 3.2125, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0625, |
|
"grad_norm": 0.9304180145263672, |
|
"learning_rate": 1.9975640502598243e-05, |
|
"loss": 3.1098, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06770833333333333, |
|
"grad_norm": 0.7351564168930054, |
|
"learning_rate": 1.9945218953682736e-05, |
|
"loss": 3.0332, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07291666666666667, |
|
"grad_norm": 0.862900972366333, |
|
"learning_rate": 1.9902680687415704e-05, |
|
"loss": 3.173, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.078125, |
|
"grad_norm": 0.7154681086540222, |
|
"learning_rate": 1.9848077530122083e-05, |
|
"loss": 3.0754, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08333333333333333, |
|
"grad_norm": 0.8201392889022827, |
|
"learning_rate": 1.9781476007338058e-05, |
|
"loss": 3.1666, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.08854166666666667, |
|
"grad_norm": 0.7526658177375793, |
|
"learning_rate": 1.9702957262759964e-05, |
|
"loss": 3.0601, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09375, |
|
"grad_norm": 0.7286026477813721, |
|
"learning_rate": 1.961261695938319e-05, |
|
"loss": 3.0727, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09375, |
|
"eval_loss": 3.06829571723938, |
|
"eval_runtime": 2.1125, |
|
"eval_samples_per_second": 76.688, |
|
"eval_steps_per_second": 9.941, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09895833333333333, |
|
"grad_norm": 0.6828358769416809, |
|
"learning_rate": 1.9510565162951538e-05, |
|
"loss": 3.0873, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10416666666666667, |
|
"grad_norm": 0.7122036814689636, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 3.042, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.109375, |
|
"grad_norm": 0.642253577709198, |
|
"learning_rate": 1.9271838545667876e-05, |
|
"loss": 3.0086, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11458333333333333, |
|
"grad_norm": 0.7375343441963196, |
|
"learning_rate": 1.913545457642601e-05, |
|
"loss": 3.0629, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11979166666666667, |
|
"grad_norm": 0.6836945414543152, |
|
"learning_rate": 1.8987940462991673e-05, |
|
"loss": 3.0095, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.125, |
|
"grad_norm": 0.8602219820022583, |
|
"learning_rate": 1.8829475928589272e-05, |
|
"loss": 3.0885, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.13020833333333334, |
|
"grad_norm": 0.6618623733520508, |
|
"learning_rate": 1.866025403784439e-05, |
|
"loss": 2.9082, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13541666666666666, |
|
"grad_norm": 0.7362732887268066, |
|
"learning_rate": 1.848048096156426e-05, |
|
"loss": 2.9769, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.140625, |
|
"grad_norm": 0.6465035080909729, |
|
"learning_rate": 1.8290375725550417e-05, |
|
"loss": 2.946, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.140625, |
|
"eval_loss": 2.9850566387176514, |
|
"eval_runtime": 2.1389, |
|
"eval_samples_per_second": 75.74, |
|
"eval_steps_per_second": 9.818, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.14583333333333334, |
|
"grad_norm": 0.6484801769256592, |
|
"learning_rate": 1.8090169943749477e-05, |
|
"loss": 2.9388, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.15104166666666666, |
|
"grad_norm": 0.6423029899597168, |
|
"learning_rate": 1.788010753606722e-05, |
|
"loss": 2.874, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.15625, |
|
"grad_norm": 0.641499936580658, |
|
"learning_rate": 1.766044443118978e-05, |
|
"loss": 2.961, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16145833333333334, |
|
"grad_norm": 0.669302225112915, |
|
"learning_rate": 1.7431448254773943e-05, |
|
"loss": 2.918, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 0.5931358933448792, |
|
"learning_rate": 1.7193398003386514e-05, |
|
"loss": 2.9327, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.171875, |
|
"grad_norm": 0.6197303533554077, |
|
"learning_rate": 1.6946583704589973e-05, |
|
"loss": 2.8986, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.17708333333333334, |
|
"grad_norm": 0.5762773752212524, |
|
"learning_rate": 1.6691306063588583e-05, |
|
"loss": 2.8925, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.18229166666666666, |
|
"grad_norm": 0.5871465802192688, |
|
"learning_rate": 1.6427876096865394e-05, |
|
"loss": 2.9471, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1875, |
|
"grad_norm": 0.6552653312683105, |
|
"learning_rate": 1.6156614753256583e-05, |
|
"loss": 2.9724, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1875, |
|
"eval_loss": 2.9104537963867188, |
|
"eval_runtime": 2.0723, |
|
"eval_samples_per_second": 78.172, |
|
"eval_steps_per_second": 10.133, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.19270833333333334, |
|
"grad_norm": 0.6476786732673645, |
|
"learning_rate": 1.5877852522924733e-05, |
|
"loss": 2.9259, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.19791666666666666, |
|
"grad_norm": 0.6450286507606506, |
|
"learning_rate": 1.5591929034707468e-05, |
|
"loss": 2.8913, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.203125, |
|
"grad_norm": 0.5890446901321411, |
|
"learning_rate": 1.529919264233205e-05, |
|
"loss": 2.9457, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20833333333333334, |
|
"grad_norm": 0.6238468885421753, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 2.8409, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.21354166666666666, |
|
"grad_norm": 0.5730621814727783, |
|
"learning_rate": 1.469471562785891e-05, |
|
"loss": 2.7774, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.21875, |
|
"grad_norm": 0.5432677268981934, |
|
"learning_rate": 1.4383711467890776e-05, |
|
"loss": 2.8458, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.22395833333333334, |
|
"grad_norm": 0.5576301217079163, |
|
"learning_rate": 1.4067366430758004e-05, |
|
"loss": 2.888, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.22916666666666666, |
|
"grad_norm": 0.5752390027046204, |
|
"learning_rate": 1.3746065934159123e-05, |
|
"loss": 2.8943, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.234375, |
|
"grad_norm": 0.5436639785766602, |
|
"learning_rate": 1.342020143325669e-05, |
|
"loss": 2.8378, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.234375, |
|
"eval_loss": 2.8462777137756348, |
|
"eval_runtime": 2.0769, |
|
"eval_samples_per_second": 78.0, |
|
"eval_steps_per_second": 10.111, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23958333333333334, |
|
"grad_norm": 0.5786373615264893, |
|
"learning_rate": 1.3090169943749475e-05, |
|
"loss": 2.8375, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.24479166666666666, |
|
"grad_norm": 0.5880608558654785, |
|
"learning_rate": 1.2756373558169992e-05, |
|
"loss": 2.8433, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.5975544452667236, |
|
"learning_rate": 1.2419218955996677e-05, |
|
"loss": 2.7899, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2552083333333333, |
|
"grad_norm": 0.5674802660942078, |
|
"learning_rate": 1.2079116908177592e-05, |
|
"loss": 2.8498, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2604166666666667, |
|
"grad_norm": 0.5659404397010803, |
|
"learning_rate": 1.1736481776669307e-05, |
|
"loss": 2.8706, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1020456257716224.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|