|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 74.17102966841186, |
|
"eval_steps": 500, |
|
"global_step": 42500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.8726003490401396, |
|
"grad_norm": 0.931673526763916, |
|
"learning_rate": 0.0009956369982547994, |
|
"loss": 2.6507, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.7452006980802792, |
|
"grad_norm": 1.8975260257720947, |
|
"learning_rate": 0.0009912739965095986, |
|
"loss": 1.6969, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.6178010471204187, |
|
"grad_norm": 0.5923702120780945, |
|
"learning_rate": 0.000986910994764398, |
|
"loss": 1.287, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.4904013961605584, |
|
"grad_norm": 0.79058438539505, |
|
"learning_rate": 0.000982547993019197, |
|
"loss": 0.9398, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.363001745200698, |
|
"grad_norm": 0.7330807447433472, |
|
"learning_rate": 0.0009781849912739965, |
|
"loss": 0.6968, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 5.2356020942408374, |
|
"grad_norm": 0.6996338963508606, |
|
"learning_rate": 0.0009738219895287959, |
|
"loss": 0.4816, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 6.108202443280978, |
|
"grad_norm": 0.5383062362670898, |
|
"learning_rate": 0.0009694589877835951, |
|
"loss": 0.3189, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 6.980802792321117, |
|
"grad_norm": 0.6486771106719971, |
|
"learning_rate": 0.0009650959860383944, |
|
"loss": 0.2218, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 7.853403141361256, |
|
"grad_norm": 0.507366955280304, |
|
"learning_rate": 0.0009607329842931938, |
|
"loss": 0.1542, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 8.726003490401396, |
|
"grad_norm": 0.8452386260032654, |
|
"learning_rate": 0.000956369982547993, |
|
"loss": 0.1285, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 9.598603839441536, |
|
"grad_norm": 0.43142977356910706, |
|
"learning_rate": 0.0009520069808027923, |
|
"loss": 0.1175, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 10.471204188481675, |
|
"grad_norm": 0.2331060916185379, |
|
"learning_rate": 0.0009476439790575916, |
|
"loss": 0.1053, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 11.343804537521814, |
|
"grad_norm": 0.4272315800189972, |
|
"learning_rate": 0.000943280977312391, |
|
"loss": 0.092, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 12.216404886561955, |
|
"grad_norm": 0.2999955117702484, |
|
"learning_rate": 0.0009389179755671902, |
|
"loss": 0.0878, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 13.089005235602095, |
|
"grad_norm": 2.55757474899292, |
|
"learning_rate": 0.0009345549738219895, |
|
"loss": 0.0884, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 13.961605584642234, |
|
"grad_norm": 0.33913654088974, |
|
"learning_rate": 0.0009301919720767889, |
|
"loss": 0.0805, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 14.834205933682373, |
|
"grad_norm": 0.3642922341823578, |
|
"learning_rate": 0.0009258289703315882, |
|
"loss": 0.0721, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 15.706806282722512, |
|
"grad_norm": 0.7718423008918762, |
|
"learning_rate": 0.0009214659685863874, |
|
"loss": 0.0687, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 16.57940663176265, |
|
"grad_norm": 0.5820666551589966, |
|
"learning_rate": 0.0009171029668411868, |
|
"loss": 0.0644, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 17.452006980802793, |
|
"grad_norm": 0.2773011028766632, |
|
"learning_rate": 0.000912739965095986, |
|
"loss": 0.0598, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 18.324607329842934, |
|
"grad_norm": 0.25250518321990967, |
|
"learning_rate": 0.0009083769633507853, |
|
"loss": 0.0653, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 19.19720767888307, |
|
"grad_norm": 0.24120768904685974, |
|
"learning_rate": 0.0009040139616055847, |
|
"loss": 0.0632, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 20.069808027923212, |
|
"grad_norm": 0.2897244989871979, |
|
"learning_rate": 0.0008996509598603839, |
|
"loss": 0.0594, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 20.94240837696335, |
|
"grad_norm": 0.25145065784454346, |
|
"learning_rate": 0.0008952879581151833, |
|
"loss": 0.0556, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 21.81500872600349, |
|
"grad_norm": 0.27175939083099365, |
|
"learning_rate": 0.0008909249563699826, |
|
"loss": 0.0481, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 22.68760907504363, |
|
"grad_norm": 0.8626015782356262, |
|
"learning_rate": 0.0008865619546247818, |
|
"loss": 0.0466, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 23.56020942408377, |
|
"grad_norm": 0.18672889471054077, |
|
"learning_rate": 0.0008821989528795812, |
|
"loss": 0.0512, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 24.43280977312391, |
|
"grad_norm": 0.2387542873620987, |
|
"learning_rate": 0.0008778359511343804, |
|
"loss": 0.0512, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 25.305410122164048, |
|
"grad_norm": 0.2665075957775116, |
|
"learning_rate": 0.0008734729493891797, |
|
"loss": 0.0483, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 26.17801047120419, |
|
"grad_norm": 0.16715960204601288, |
|
"learning_rate": 0.0008691099476439791, |
|
"loss": 0.0441, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 27.050610820244327, |
|
"grad_norm": 0.1875993311405182, |
|
"learning_rate": 0.0008647469458987784, |
|
"loss": 0.0397, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 27.923211169284468, |
|
"grad_norm": 0.25451162457466125, |
|
"learning_rate": 0.0008603839441535776, |
|
"loss": 0.039, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 28.79581151832461, |
|
"grad_norm": 0.307054728269577, |
|
"learning_rate": 0.000856020942408377, |
|
"loss": 0.0443, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 29.668411867364746, |
|
"grad_norm": 0.2467741221189499, |
|
"learning_rate": 0.0008516579406631763, |
|
"loss": 0.038, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 30.541012216404887, |
|
"grad_norm": 0.23178012669086456, |
|
"learning_rate": 0.0008472949389179755, |
|
"loss": 0.0351, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 31.413612565445025, |
|
"grad_norm": 0.35061487555503845, |
|
"learning_rate": 0.0008429319371727748, |
|
"loss": 0.0323, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 32.28621291448517, |
|
"grad_norm": 0.3363337218761444, |
|
"learning_rate": 0.0008385689354275742, |
|
"loss": 0.038, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 33.1588132635253, |
|
"grad_norm": 0.24015972018241882, |
|
"learning_rate": 0.0008342059336823735, |
|
"loss": 0.0361, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 34.031413612565444, |
|
"grad_norm": 0.5147364735603333, |
|
"learning_rate": 0.0008298429319371727, |
|
"loss": 0.0318, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 34.904013961605585, |
|
"grad_norm": 0.17477251589298248, |
|
"learning_rate": 0.0008254799301919721, |
|
"loss": 0.0307, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 35.776614310645726, |
|
"grad_norm": 0.2511900067329407, |
|
"learning_rate": 0.0008211169284467714, |
|
"loss": 0.0348, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 36.64921465968587, |
|
"grad_norm": 0.2036200314760208, |
|
"learning_rate": 0.0008167539267015707, |
|
"loss": 0.0298, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 37.521815008726, |
|
"grad_norm": 0.16981545090675354, |
|
"learning_rate": 0.00081239092495637, |
|
"loss": 0.0319, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 38.39441535776614, |
|
"grad_norm": 0.22329097986221313, |
|
"learning_rate": 0.0008080279232111692, |
|
"loss": 0.0359, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 39.26701570680628, |
|
"grad_norm": 0.13258185982704163, |
|
"learning_rate": 0.0008036649214659686, |
|
"loss": 0.0266, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 40.139616055846425, |
|
"grad_norm": 0.12790647149085999, |
|
"learning_rate": 0.000799301919720768, |
|
"loss": 0.026, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 41.01221640488656, |
|
"grad_norm": 0.23635344207286835, |
|
"learning_rate": 0.0007949389179755671, |
|
"loss": 0.0279, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 41.8848167539267, |
|
"grad_norm": 0.11364254355430603, |
|
"learning_rate": 0.0007905759162303665, |
|
"loss": 0.0257, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 42.75741710296684, |
|
"grad_norm": 0.2781168520450592, |
|
"learning_rate": 0.0007862129144851659, |
|
"loss": 0.0295, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 43.63001745200698, |
|
"grad_norm": 0.106789730489254, |
|
"learning_rate": 0.0007818499127399651, |
|
"loss": 0.0308, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 44.50261780104712, |
|
"grad_norm": 0.16404911875724792, |
|
"learning_rate": 0.0007774869109947644, |
|
"loss": 0.0222, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 45.37521815008726, |
|
"grad_norm": 0.14249293506145477, |
|
"learning_rate": 0.0007731239092495637, |
|
"loss": 0.0225, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 46.2478184991274, |
|
"grad_norm": 0.1853444129228592, |
|
"learning_rate": 0.0007687609075043631, |
|
"loss": 0.0261, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 47.12041884816754, |
|
"grad_norm": 0.1456003040075302, |
|
"learning_rate": 0.0007643979057591623, |
|
"loss": 0.0252, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 47.99301919720768, |
|
"grad_norm": 0.16386698186397552, |
|
"learning_rate": 0.0007600349040139616, |
|
"loss": 0.0259, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 48.86561954624782, |
|
"grad_norm": 0.12221992015838623, |
|
"learning_rate": 0.000755671902268761, |
|
"loss": 0.0253, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 49.738219895287955, |
|
"grad_norm": 0.14093224704265594, |
|
"learning_rate": 0.0007513089005235602, |
|
"loss": 0.0203, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 50.610820244328096, |
|
"grad_norm": 0.1189383938908577, |
|
"learning_rate": 0.0007469458987783595, |
|
"loss": 0.0207, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 51.48342059336824, |
|
"grad_norm": 0.1471104919910431, |
|
"learning_rate": 0.0007425828970331589, |
|
"loss": 0.0209, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 52.35602094240838, |
|
"grad_norm": 0.08947575837373734, |
|
"learning_rate": 0.0007382198952879581, |
|
"loss": 0.0234, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 53.22862129144852, |
|
"grad_norm": 0.18746259808540344, |
|
"learning_rate": 0.0007338568935427574, |
|
"loss": 0.0245, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 54.10122164048865, |
|
"grad_norm": 0.1539311408996582, |
|
"learning_rate": 0.0007294938917975568, |
|
"loss": 0.0214, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 54.973821989528794, |
|
"grad_norm": 0.11201947182416916, |
|
"learning_rate": 0.000725130890052356, |
|
"loss": 0.0194, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 55.846422338568935, |
|
"grad_norm": 0.16618479788303375, |
|
"learning_rate": 0.0007207678883071554, |
|
"loss": 0.0185, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 56.719022687609076, |
|
"grad_norm": 0.1569599211215973, |
|
"learning_rate": 0.0007164048865619547, |
|
"loss": 0.0234, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 57.59162303664922, |
|
"grad_norm": 0.11062045395374298, |
|
"learning_rate": 0.0007120418848167539, |
|
"loss": 0.0187, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 58.46422338568935, |
|
"grad_norm": 0.1617700457572937, |
|
"learning_rate": 0.0007076788830715533, |
|
"loss": 0.0176, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 59.33682373472949, |
|
"grad_norm": 0.11750755459070206, |
|
"learning_rate": 0.0007033158813263525, |
|
"loss": 0.0188, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 60.20942408376963, |
|
"grad_norm": 0.24937282502651215, |
|
"learning_rate": 0.0006989528795811518, |
|
"loss": 0.0225, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 61.082024432809774, |
|
"grad_norm": 0.22410957515239716, |
|
"learning_rate": 0.0006945898778359512, |
|
"loss": 0.0192, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 61.954624781849915, |
|
"grad_norm": 0.18029357492923737, |
|
"learning_rate": 0.0006902268760907505, |
|
"loss": 0.0169, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 62.82722513089005, |
|
"grad_norm": 0.14077898859977722, |
|
"learning_rate": 0.0006858638743455497, |
|
"loss": 0.0167, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 63.69982547993019, |
|
"grad_norm": 0.12804169952869415, |
|
"learning_rate": 0.0006815008726003491, |
|
"loss": 0.0167, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 64.57242582897034, |
|
"grad_norm": 0.11356078833341599, |
|
"learning_rate": 0.0006771378708551484, |
|
"loss": 0.0183, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 65.44502617801047, |
|
"grad_norm": 0.07214757055044174, |
|
"learning_rate": 0.0006727748691099476, |
|
"loss": 0.0167, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 66.3176265270506, |
|
"grad_norm": 0.20497408509254456, |
|
"learning_rate": 0.0006684118673647469, |
|
"loss": 0.0186, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 67.19022687609075, |
|
"grad_norm": 0.09408937394618988, |
|
"learning_rate": 0.0006640488656195463, |
|
"loss": 0.0169, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 68.06282722513089, |
|
"grad_norm": 0.12423662841320038, |
|
"learning_rate": 0.0006596858638743456, |
|
"loss": 0.0179, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 68.93542757417103, |
|
"grad_norm": 0.4406953752040863, |
|
"learning_rate": 0.0006553228621291448, |
|
"loss": 0.019, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 69.80802792321117, |
|
"grad_norm": 0.11233725398778915, |
|
"learning_rate": 0.0006509598603839442, |
|
"loss": 0.0151, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 70.68062827225131, |
|
"grad_norm": 0.08892803639173508, |
|
"learning_rate": 0.0006465968586387435, |
|
"loss": 0.0137, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 71.55322862129145, |
|
"grad_norm": 0.11712398380041122, |
|
"learning_rate": 0.0006422338568935428, |
|
"loss": 0.0145, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 72.4258289703316, |
|
"grad_norm": 0.11560557782649994, |
|
"learning_rate": 0.0006378708551483421, |
|
"loss": 0.0181, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 73.29842931937172, |
|
"grad_norm": 0.10780952870845795, |
|
"learning_rate": 0.0006335078534031413, |
|
"loss": 0.0162, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 74.17102966841186, |
|
"grad_norm": 0.06540343165397644, |
|
"learning_rate": 0.0006291448516579407, |
|
"loss": 0.0157, |
|
"step": 42500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 114600, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 200, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.392554571005952e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|