|
{ |
|
"best_metric": 0.050220850855112076, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.400932400932401, |
|
"eval_steps": 50, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.018648018648018648, |
|
"grad_norm": 3.7544713020324707, |
|
"learning_rate": 5e-06, |
|
"loss": 2.5029, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018648018648018648, |
|
"eval_loss": 2.441243886947632, |
|
"eval_runtime": 15.0491, |
|
"eval_samples_per_second": 6.047, |
|
"eval_steps_per_second": 1.528, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.037296037296037296, |
|
"grad_norm": 3.805962085723877, |
|
"learning_rate": 1e-05, |
|
"loss": 2.496, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.055944055944055944, |
|
"grad_norm": 3.6651675701141357, |
|
"learning_rate": 1.5e-05, |
|
"loss": 2.4683, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.07459207459207459, |
|
"grad_norm": 3.5058395862579346, |
|
"learning_rate": 2e-05, |
|
"loss": 2.4108, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.09324009324009325, |
|
"grad_norm": 3.1128268241882324, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.2825, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.11188811188811189, |
|
"grad_norm": 2.886013984680176, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9991, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.13053613053613053, |
|
"grad_norm": 2.2185962200164795, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.8074, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.14918414918414918, |
|
"grad_norm": 2.538952350616455, |
|
"learning_rate": 4e-05, |
|
"loss": 1.6528, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.16783216783216784, |
|
"grad_norm": 2.6650140285491943, |
|
"learning_rate": 4.5e-05, |
|
"loss": 1.3879, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1864801864801865, |
|
"grad_norm": 2.4783599376678467, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1667, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.20512820512820512, |
|
"grad_norm": 2.2430834770202637, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.9177, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.22377622377622378, |
|
"grad_norm": 2.096689462661743, |
|
"learning_rate": 6e-05, |
|
"loss": 0.7368, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.24242424242424243, |
|
"grad_norm": 1.8305144309997559, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 0.5248, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.26107226107226106, |
|
"grad_norm": 1.5723942518234253, |
|
"learning_rate": 7e-05, |
|
"loss": 0.3459, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.27972027972027974, |
|
"grad_norm": 3.2492153644561768, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.213, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29836829836829837, |
|
"grad_norm": 1.0065910816192627, |
|
"learning_rate": 8e-05, |
|
"loss": 0.1651, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.317016317016317, |
|
"grad_norm": 0.8119553923606873, |
|
"learning_rate": 8.5e-05, |
|
"loss": 0.131, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3356643356643357, |
|
"grad_norm": 0.5442985892295837, |
|
"learning_rate": 9e-05, |
|
"loss": 0.1094, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3543123543123543, |
|
"grad_norm": 0.44042328000068665, |
|
"learning_rate": 9.5e-05, |
|
"loss": 0.0855, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.372960372960373, |
|
"grad_norm": 0.5316314101219177, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1114, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3916083916083916, |
|
"grad_norm": 0.3505031168460846, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.0716, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.41025641025641024, |
|
"grad_norm": 0.48583555221557617, |
|
"learning_rate": 9.967408676742751e-05, |
|
"loss": 0.1003, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.4289044289044289, |
|
"grad_norm": 0.3102801740169525, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.0606, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.44755244755244755, |
|
"grad_norm": 0.2852686643600464, |
|
"learning_rate": 9.870059584711668e-05, |
|
"loss": 0.0587, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4662004662004662, |
|
"grad_norm": 0.2183716744184494, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.0522, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.48484848484848486, |
|
"grad_norm": 0.26890677213668823, |
|
"learning_rate": 9.709221818197624e-05, |
|
"loss": 0.0858, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.5034965034965035, |
|
"grad_norm": 0.18300114572048187, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.0496, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5221445221445221, |
|
"grad_norm": 0.20950715243816376, |
|
"learning_rate": 9.486992143456792e-05, |
|
"loss": 0.0701, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5407925407925408, |
|
"grad_norm": 0.1695779263973236, |
|
"learning_rate": 9.353730385598887e-05, |
|
"loss": 0.0521, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.5594405594405595, |
|
"grad_norm": 0.1300334930419922, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 0.0487, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.578088578088578, |
|
"grad_norm": 0.09397494792938232, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0517, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5967365967365967, |
|
"grad_norm": 0.1267092227935791, |
|
"learning_rate": 8.870708053195413e-05, |
|
"loss": 0.0391, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 0.13381347060203552, |
|
"learning_rate": 8.683705689382024e-05, |
|
"loss": 0.0398, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.634032634032634, |
|
"grad_norm": 0.1413053721189499, |
|
"learning_rate": 8.484687843276469e-05, |
|
"loss": 0.0406, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.6526806526806527, |
|
"grad_norm": 0.10715018212795258, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 0.0405, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6713286713286714, |
|
"grad_norm": 0.18798989057540894, |
|
"learning_rate": 8.053239398177191e-05, |
|
"loss": 0.0701, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6899766899766899, |
|
"grad_norm": 0.10021158307790756, |
|
"learning_rate": 7.822216094333847e-05, |
|
"loss": 0.0415, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.7086247086247086, |
|
"grad_norm": 0.09004908055067062, |
|
"learning_rate": 7.58198730819481e-05, |
|
"loss": 0.0466, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.10785119980573654, |
|
"learning_rate": 7.333336616128369e-05, |
|
"loss": 0.0429, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.745920745920746, |
|
"grad_norm": 0.1452600061893463, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 0.0423, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7645687645687645, |
|
"grad_norm": 0.09562265872955322, |
|
"learning_rate": 6.814038526753205e-05, |
|
"loss": 0.0466, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7832167832167832, |
|
"grad_norm": 0.08707461506128311, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.0439, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.8018648018648019, |
|
"grad_norm": 0.0825454518198967, |
|
"learning_rate": 6.271091670967436e-05, |
|
"loss": 0.0406, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.8205128205128205, |
|
"grad_norm": 0.09737467020750046, |
|
"learning_rate": 5.992952333228728e-05, |
|
"loss": 0.0419, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.8391608391608392, |
|
"grad_norm": 0.0975765511393547, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 0.0417, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8578088578088578, |
|
"grad_norm": 0.06991899758577347, |
|
"learning_rate": 5.427875042394199e-05, |
|
"loss": 0.0454, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.8764568764568764, |
|
"grad_norm": 0.07402784377336502, |
|
"learning_rate": 5.142780253968481e-05, |
|
"loss": 0.0459, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.8951048951048951, |
|
"grad_norm": 0.06774295121431351, |
|
"learning_rate": 4.85721974603152e-05, |
|
"loss": 0.0384, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.9137529137529138, |
|
"grad_norm": 0.10975111275911331, |
|
"learning_rate": 4.5721249576058027e-05, |
|
"loss": 0.0418, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.9324009324009324, |
|
"grad_norm": 0.06744930893182755, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 0.0471, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9324009324009324, |
|
"eval_loss": 0.050220850855112076, |
|
"eval_runtime": 14.8191, |
|
"eval_samples_per_second": 6.141, |
|
"eval_steps_per_second": 1.552, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.951048951048951, |
|
"grad_norm": 0.13983112573623657, |
|
"learning_rate": 4.007047666771274e-05, |
|
"loss": 0.0631, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.9696969696969697, |
|
"grad_norm": 0.1023995652794838, |
|
"learning_rate": 3.728908329032567e-05, |
|
"loss": 0.0313, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.9883449883449883, |
|
"grad_norm": 0.08331304788589478, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.0426, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.0093240093240092, |
|
"grad_norm": 0.12266716361045837, |
|
"learning_rate": 3.1859614732467954e-05, |
|
"loss": 0.0565, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.027972027972028, |
|
"grad_norm": 0.055081456899642944, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 0.0395, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.0466200466200466, |
|
"grad_norm": 0.08440406620502472, |
|
"learning_rate": 2.6666633838716314e-05, |
|
"loss": 0.0375, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.0652680652680653, |
|
"grad_norm": 0.08142155408859253, |
|
"learning_rate": 2.418012691805191e-05, |
|
"loss": 0.0374, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.083916083916084, |
|
"grad_norm": 0.0638071596622467, |
|
"learning_rate": 2.1777839056661554e-05, |
|
"loss": 0.0448, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.1025641025641026, |
|
"grad_norm": 0.07102889567613602, |
|
"learning_rate": 1.946760601822809e-05, |
|
"loss": 0.038, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.121212121212121, |
|
"grad_norm": 0.06918191909790039, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 0.0398, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.1398601398601398, |
|
"grad_norm": 0.06793826073408127, |
|
"learning_rate": 1.5153121567235335e-05, |
|
"loss": 0.037, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.1585081585081585, |
|
"grad_norm": 0.060138434171676636, |
|
"learning_rate": 1.3162943106179749e-05, |
|
"loss": 0.0412, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.1771561771561772, |
|
"grad_norm": 0.06100895628333092, |
|
"learning_rate": 1.1292919468045877e-05, |
|
"loss": 0.0391, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1958041958041958, |
|
"grad_norm": 0.08120011538267136, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.0377, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.2144522144522145, |
|
"grad_norm": 0.05974452942609787, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 0.0496, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.2331002331002332, |
|
"grad_norm": 0.06714749336242676, |
|
"learning_rate": 6.462696144011149e-06, |
|
"loss": 0.0411, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.2517482517482517, |
|
"grad_norm": 0.06198333948850632, |
|
"learning_rate": 5.13007856543209e-06, |
|
"loss": 0.0394, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.2703962703962703, |
|
"grad_norm": 0.08504387736320496, |
|
"learning_rate": 3.9438173442575e-06, |
|
"loss": 0.0353, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.289044289044289, |
|
"grad_norm": 0.12137630581855774, |
|
"learning_rate": 2.9077818180237693e-06, |
|
"loss": 0.0452, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.3076923076923077, |
|
"grad_norm": 0.06834172457456589, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 0.0434, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.3263403263403264, |
|
"grad_norm": 0.05725034326314926, |
|
"learning_rate": 1.2994041528833266e-06, |
|
"loss": 0.0462, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.3449883449883449, |
|
"grad_norm": 0.06547888368368149, |
|
"learning_rate": 7.323082076153509e-07, |
|
"loss": 0.0408, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.3636363636363638, |
|
"grad_norm": 0.060845568776130676, |
|
"learning_rate": 3.2591323257248893e-07, |
|
"loss": 0.0457, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.3822843822843822, |
|
"grad_norm": 0.05932041257619858, |
|
"learning_rate": 8.15448036932176e-08, |
|
"loss": 0.0422, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.400932400932401, |
|
"grad_norm": 0.0670986920595169, |
|
"learning_rate": 0.0, |
|
"loss": 0.0386, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.262770368118784e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|