|
{ |
|
"best_metric": 2.119426727294922, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.2702702702702703, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005405405405405406, |
|
"grad_norm": 0.16381613910198212, |
|
"learning_rate": 2e-05, |
|
"loss": 2.3432, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005405405405405406, |
|
"eval_loss": 2.171922206878662, |
|
"eval_runtime": 16.6123, |
|
"eval_samples_per_second": 4.695, |
|
"eval_steps_per_second": 2.348, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010810810810810811, |
|
"grad_norm": 0.18275554478168488, |
|
"learning_rate": 4e-05, |
|
"loss": 2.2107, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.016216216216216217, |
|
"grad_norm": 0.19883139431476593, |
|
"learning_rate": 6e-05, |
|
"loss": 2.0782, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.021621621621621623, |
|
"grad_norm": 0.17207121849060059, |
|
"learning_rate": 8e-05, |
|
"loss": 2.4458, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02702702702702703, |
|
"grad_norm": 0.17877055704593658, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0913, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.032432432432432434, |
|
"grad_norm": 0.180442675948143, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 2.3307, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03783783783783784, |
|
"grad_norm": 0.19494755566120148, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 2.4467, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.043243243243243246, |
|
"grad_norm": 0.19875937700271606, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 2.4425, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04864864864864865, |
|
"grad_norm": 0.19535940885543823, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 2.4534, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05405405405405406, |
|
"grad_norm": 0.22583356499671936, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 2.1556, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05945945945945946, |
|
"grad_norm": 0.21046710014343262, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 2.4369, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06486486486486487, |
|
"grad_norm": 0.21617771685123444, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 2.1116, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07027027027027027, |
|
"grad_norm": 0.1935390830039978, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 2.1569, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07567567567567568, |
|
"grad_norm": 0.21900971233844757, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 2.1244, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.08108108108108109, |
|
"grad_norm": 0.1987326294183731, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 2.2487, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08648648648648649, |
|
"grad_norm": 0.19258294999599457, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 2.3195, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0918918918918919, |
|
"grad_norm": 0.18117763102054596, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 2.2694, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0972972972972973, |
|
"grad_norm": 0.20509183406829834, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 2.0377, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.10270270270270271, |
|
"grad_norm": 0.21269042789936066, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 2.1798, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"grad_norm": 0.25054454803466797, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 2.176, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.11351351351351352, |
|
"grad_norm": 0.21175694465637207, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 2.1471, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11891891891891893, |
|
"grad_norm": 0.21485082805156708, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 2.1054, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.12432432432432433, |
|
"grad_norm": 0.215715229511261, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 2.2539, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12972972972972974, |
|
"grad_norm": 0.2235511839389801, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.967, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"grad_norm": 0.24300596117973328, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 2.2239, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"eval_loss": 2.119426727294922, |
|
"eval_runtime": 16.6129, |
|
"eval_samples_per_second": 4.695, |
|
"eval_steps_per_second": 2.348, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14054054054054055, |
|
"grad_norm": 0.23708048462867737, |
|
"learning_rate": 8.842005554284296e-05, |
|
"loss": 2.1888, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.14594594594594595, |
|
"grad_norm": 0.2406768947839737, |
|
"learning_rate": 8.73410738492077e-05, |
|
"loss": 2.1915, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.15135135135135136, |
|
"grad_norm": 0.24808815121650696, |
|
"learning_rate": 8.622126023955446e-05, |
|
"loss": 2.0992, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.15675675675675677, |
|
"grad_norm": 0.25979381799697876, |
|
"learning_rate": 8.506183921362443e-05, |
|
"loss": 2.1816, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.16216216216216217, |
|
"grad_norm": 0.26246872544288635, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 2.1843, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16756756756756758, |
|
"grad_norm": 0.23941665887832642, |
|
"learning_rate": 8.262928807620843e-05, |
|
"loss": 2.2098, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.17297297297297298, |
|
"grad_norm": 0.2462311089038849, |
|
"learning_rate": 8.135881792367686e-05, |
|
"loss": 2.0607, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1783783783783784, |
|
"grad_norm": 0.2824133038520813, |
|
"learning_rate": 8.005405736415126e-05, |
|
"loss": 2.1525, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1837837837837838, |
|
"grad_norm": 0.3467615842819214, |
|
"learning_rate": 7.871643313414718e-05, |
|
"loss": 2.0407, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.1891891891891892, |
|
"grad_norm": 0.30004552006721497, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 2.1346, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1945945945945946, |
|
"grad_norm": 0.29786911606788635, |
|
"learning_rate": 7.594847868906076e-05, |
|
"loss": 2.1989, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.3035438358783722, |
|
"learning_rate": 7.452117519152542e-05, |
|
"loss": 1.9934, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.20540540540540542, |
|
"grad_norm": 0.3192494213581085, |
|
"learning_rate": 7.30670581489344e-05, |
|
"loss": 2.1614, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.21081081081081082, |
|
"grad_norm": 0.32142555713653564, |
|
"learning_rate": 7.158771761692464e-05, |
|
"loss": 2.142, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"grad_norm": 0.2814510762691498, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 2.1949, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.22162162162162163, |
|
"grad_norm": 0.27261224389076233, |
|
"learning_rate": 6.855986244591104e-05, |
|
"loss": 2.2867, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.22702702702702704, |
|
"grad_norm": 0.3069376051425934, |
|
"learning_rate": 6.701465872208216e-05, |
|
"loss": 2.4095, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.23243243243243245, |
|
"grad_norm": 0.30093446373939514, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.2246, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.23783783783783785, |
|
"grad_norm": 0.37811461091041565, |
|
"learning_rate": 6.387014543809223e-05, |
|
"loss": 2.2689, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.24324324324324326, |
|
"grad_norm": 0.4132545292377472, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 2.1285, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.24864864864864866, |
|
"grad_norm": 0.4661512076854706, |
|
"learning_rate": 6.066498153718735e-05, |
|
"loss": 2.4561, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.25405405405405407, |
|
"grad_norm": 0.18825818598270416, |
|
"learning_rate": 5.90440267166055e-05, |
|
"loss": 2.1234, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.2594594594594595, |
|
"grad_norm": 0.17494626343250275, |
|
"learning_rate": 5.74131823855921e-05, |
|
"loss": 2.0999, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2648648648648649, |
|
"grad_norm": 0.1872102916240692, |
|
"learning_rate": 5.577423184847932e-05, |
|
"loss": 2.2597, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"grad_norm": 0.1854385882616043, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 2.3098, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"eval_loss": 2.1201837062835693, |
|
"eval_runtime": 16.9001, |
|
"eval_samples_per_second": 4.615, |
|
"eval_steps_per_second": 2.308, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.304494077457203e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|