|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0086580086580086, |
|
"eval_steps": 15, |
|
"global_step": 173, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.017316017316017316, |
|
"eval_loss": 0.8123326897621155, |
|
"eval_runtime": 21.4215, |
|
"eval_samples_per_second": 4.528, |
|
"eval_steps_per_second": 0.607, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"grad_norm": 1.44769287109375, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0463, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1038961038961039, |
|
"grad_norm": 0.9673851728439331, |
|
"learning_rate": 6e-05, |
|
"loss": 2.883, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.15584415584415584, |
|
"grad_norm": 1.2237026691436768, |
|
"learning_rate": 9e-05, |
|
"loss": 3.0048, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2077922077922078, |
|
"grad_norm": 1.0807669162750244, |
|
"learning_rate": 9.9962857531815e-05, |
|
"loss": 2.8812, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2597402597402597, |
|
"grad_norm": 0.8111446499824524, |
|
"learning_rate": 9.976801044672608e-05, |
|
"loss": 2.8254, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2597402597402597, |
|
"eval_loss": 0.7221285700798035, |
|
"eval_runtime": 21.8495, |
|
"eval_samples_per_second": 4.439, |
|
"eval_steps_per_second": 0.595, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3116883116883117, |
|
"grad_norm": 0.7993184924125671, |
|
"learning_rate": 9.940682350363912e-05, |
|
"loss": 2.5783, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 0.88717120885849, |
|
"learning_rate": 9.888050389939172e-05, |
|
"loss": 3.0288, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4155844155844156, |
|
"grad_norm": 0.9446913003921509, |
|
"learning_rate": 9.819081075450014e-05, |
|
"loss": 2.6752, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4675324675324675, |
|
"grad_norm": 0.7598215937614441, |
|
"learning_rate": 9.734004923364257e-05, |
|
"loss": 2.6636, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5194805194805194, |
|
"grad_norm": 0.9138664603233337, |
|
"learning_rate": 9.63310628410961e-05, |
|
"loss": 2.6451, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5194805194805194, |
|
"eval_loss": 0.6947051286697388, |
|
"eval_runtime": 21.8569, |
|
"eval_samples_per_second": 4.438, |
|
"eval_steps_per_second": 0.595, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.7083372473716736, |
|
"learning_rate": 9.516722391687902e-05, |
|
"loss": 2.5942, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6233766233766234, |
|
"grad_norm": 0.6996327042579651, |
|
"learning_rate": 9.38524223653626e-05, |
|
"loss": 2.648, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6753246753246753, |
|
"grad_norm": 0.7574095726013184, |
|
"learning_rate": 9.239105265402525e-05, |
|
"loss": 2.5124, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.7147637605667114, |
|
"learning_rate": 9.078799912580304e-05, |
|
"loss": 2.652, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7792207792207793, |
|
"grad_norm": 0.8316633701324463, |
|
"learning_rate": 8.904861967412703e-05, |
|
"loss": 2.5992, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7792207792207793, |
|
"eval_loss": 0.6810405254364014, |
|
"eval_runtime": 21.865, |
|
"eval_samples_per_second": 4.436, |
|
"eval_steps_per_second": 0.595, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8311688311688312, |
|
"grad_norm": 0.7336857318878174, |
|
"learning_rate": 8.717872783521047e-05, |
|
"loss": 2.4763, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8831168831168831, |
|
"grad_norm": 0.6515845060348511, |
|
"learning_rate": 8.518457335743926e-05, |
|
"loss": 2.5329, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.935064935064935, |
|
"grad_norm": 0.7598447203636169, |
|
"learning_rate": 8.307282131280804e-05, |
|
"loss": 2.6017, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.987012987012987, |
|
"grad_norm": 0.6881215572357178, |
|
"learning_rate": 8.085052982021847e-05, |
|
"loss": 2.5885, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.0432900432900434, |
|
"grad_norm": 0.6885347962379456, |
|
"learning_rate": 7.85251264550948e-05, |
|
"loss": 2.3153, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0432900432900434, |
|
"eval_loss": 0.6708703637123108, |
|
"eval_runtime": 21.8845, |
|
"eval_samples_per_second": 4.432, |
|
"eval_steps_per_second": 0.594, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0952380952380953, |
|
"grad_norm": 0.8033732175827026, |
|
"learning_rate": 7.610438342416319e-05, |
|
"loss": 2.1058, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1471861471861473, |
|
"grad_norm": 0.7723002433776855, |
|
"learning_rate": 7.359639158836828e-05, |
|
"loss": 2.0976, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.199134199134199, |
|
"grad_norm": 0.8295950889587402, |
|
"learning_rate": 7.10095334207501e-05, |
|
"loss": 2.1156, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.251082251082251, |
|
"grad_norm": 0.8891462683677673, |
|
"learning_rate": 6.835245498966461e-05, |
|
"loss": 2.3097, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.303030303030303, |
|
"grad_norm": 0.8947754502296448, |
|
"learning_rate": 6.563403706098833e-05, |
|
"loss": 2.2014, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.303030303030303, |
|
"eval_loss": 0.6706818342208862, |
|
"eval_runtime": 21.8884, |
|
"eval_samples_per_second": 4.432, |
|
"eval_steps_per_second": 0.594, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.354978354978355, |
|
"grad_norm": 0.8432238101959229, |
|
"learning_rate": 6.286336541589224e-05, |
|
"loss": 2.2449, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.406926406926407, |
|
"grad_norm": 0.9027056097984314, |
|
"learning_rate": 6.004970048339226e-05, |
|
"loss": 2.2785, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.4588744588744589, |
|
"grad_norm": 0.977626621723175, |
|
"learning_rate": 5.7202446389173223e-05, |
|
"loss": 2.1978, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.5108225108225108, |
|
"grad_norm": 1.0718011856079102, |
|
"learning_rate": 5.433111952413495e-05, |
|
"loss": 2.1774, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.5627705627705628, |
|
"grad_norm": 1.034270167350769, |
|
"learning_rate": 5.144531673771363e-05, |
|
"loss": 2.1726, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5627705627705628, |
|
"eval_loss": 0.6731178164482117, |
|
"eval_runtime": 21.8631, |
|
"eval_samples_per_second": 4.437, |
|
"eval_steps_per_second": 0.595, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6147186147186146, |
|
"grad_norm": 0.9706316590309143, |
|
"learning_rate": 4.855468326228638e-05, |
|
"loss": 2.0843, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 1.0962471961975098, |
|
"learning_rate": 4.566888047586507e-05, |
|
"loss": 2.0797, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.7186147186147185, |
|
"grad_norm": 0.9773306846618652, |
|
"learning_rate": 4.27975536108268e-05, |
|
"loss": 2.1115, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.7705627705627704, |
|
"grad_norm": 0.9265680909156799, |
|
"learning_rate": 3.9950299516607766e-05, |
|
"loss": 2.1697, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.8225108225108224, |
|
"grad_norm": 1.1050467491149902, |
|
"learning_rate": 3.713663458410779e-05, |
|
"loss": 2.157, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.8225108225108224, |
|
"eval_loss": 0.6672056317329407, |
|
"eval_runtime": 21.8494, |
|
"eval_samples_per_second": 4.439, |
|
"eval_steps_per_second": 0.595, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.8744588744588744, |
|
"grad_norm": 1.011114239692688, |
|
"learning_rate": 3.43659629390117e-05, |
|
"loss": 2.2031, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.9264069264069263, |
|
"grad_norm": 0.9879550933837891, |
|
"learning_rate": 3.16475450103354e-05, |
|
"loss": 2.1106, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.9783549783549783, |
|
"grad_norm": 1.0455985069274902, |
|
"learning_rate": 2.899046657924992e-05, |
|
"loss": 2.0031, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.034632034632035, |
|
"grad_norm": 1.1132123470306396, |
|
"learning_rate": 2.6403608411631742e-05, |
|
"loss": 1.843, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 2.0865800865800868, |
|
"grad_norm": 1.035132646560669, |
|
"learning_rate": 2.389561657583681e-05, |
|
"loss": 1.6831, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0865800865800868, |
|
"eval_loss": 0.6776129603385925, |
|
"eval_runtime": 21.8631, |
|
"eval_samples_per_second": 4.437, |
|
"eval_steps_per_second": 0.595, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.1385281385281387, |
|
"grad_norm": 1.176239252090454, |
|
"learning_rate": 2.1474873544905205e-05, |
|
"loss": 1.7413, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 2.1904761904761907, |
|
"grad_norm": 1.239511489868164, |
|
"learning_rate": 1.914947017978153e-05, |
|
"loss": 1.8036, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.242424242424242, |
|
"grad_norm": 1.0849404335021973, |
|
"learning_rate": 1.692717868719195e-05, |
|
"loss": 1.9443, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 2.2943722943722946, |
|
"grad_norm": 1.1872035264968872, |
|
"learning_rate": 1.4815426642560754e-05, |
|
"loss": 1.7633, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 2.346320346320346, |
|
"grad_norm": 1.1865441799163818, |
|
"learning_rate": 1.2821272164789544e-05, |
|
"loss": 1.8487, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.346320346320346, |
|
"eval_loss": 0.6821556687355042, |
|
"eval_runtime": 21.8619, |
|
"eval_samples_per_second": 4.437, |
|
"eval_steps_per_second": 0.595, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.398268398268398, |
|
"grad_norm": 1.1166489124298096, |
|
"learning_rate": 1.0951380325872979e-05, |
|
"loss": 1.5943, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 2.45021645021645, |
|
"grad_norm": 1.1581447124481201, |
|
"learning_rate": 9.212000874196953e-06, |
|
"loss": 1.7557, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 2.502164502164502, |
|
"grad_norm": 1.1575648784637451, |
|
"learning_rate": 7.60894734597476e-06, |
|
"loss": 1.6507, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 2.554112554112554, |
|
"grad_norm": 1.119139313697815, |
|
"learning_rate": 6.147577634637414e-06, |
|
"loss": 1.7355, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 2.606060606060606, |
|
"grad_norm": 1.2103753089904785, |
|
"learning_rate": 4.832776083120982e-06, |
|
"loss": 1.6898, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.606060606060606, |
|
"eval_loss": 0.6833479404449463, |
|
"eval_runtime": 21.8503, |
|
"eval_samples_per_second": 4.439, |
|
"eval_steps_per_second": 0.595, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.658008658008658, |
|
"grad_norm": 1.2263282537460327, |
|
"learning_rate": 3.668937158903901e-06, |
|
"loss": 1.7352, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 2.70995670995671, |
|
"grad_norm": 1.3095903396606445, |
|
"learning_rate": 2.6599507663574384e-06, |
|
"loss": 1.8122, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 2.761904761904762, |
|
"grad_norm": 1.2558013200759888, |
|
"learning_rate": 1.8091892454998594e-06, |
|
"loss": 1.6398, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 2.813852813852814, |
|
"grad_norm": 1.2757107019424438, |
|
"learning_rate": 1.1194961006082972e-06, |
|
"loss": 1.7014, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 2.865800865800866, |
|
"grad_norm": 1.1844847202301025, |
|
"learning_rate": 5.931764963608866e-07, |
|
"loss": 1.8026, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.865800865800866, |
|
"eval_loss": 0.6837310194969177, |
|
"eval_runtime": 43.4558, |
|
"eval_samples_per_second": 2.232, |
|
"eval_steps_per_second": 0.299, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.9177489177489178, |
|
"grad_norm": 1.2679266929626465, |
|
"learning_rate": 2.319895532739369e-07, |
|
"loss": 1.6958, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 2.9696969696969697, |
|
"grad_norm": 1.2044878005981445, |
|
"learning_rate": 3.7142468185014104e-08, |
|
"loss": 1.697, |
|
"step": 171 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 173, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 15, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.887307373379584e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|