|
{ |
|
"best_metric": 0.5200080275535583, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.5657708628005658, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.011315417256011316, |
|
"grad_norm": 2.1943609714508057, |
|
"learning_rate": 5e-05, |
|
"loss": 8.4242, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.011315417256011316, |
|
"eval_loss": 0.8281431198120117, |
|
"eval_runtime": 44.4369, |
|
"eval_samples_per_second": 13.39, |
|
"eval_steps_per_second": 1.688, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02263083451202263, |
|
"grad_norm": 2.6719741821289062, |
|
"learning_rate": 0.0001, |
|
"loss": 9.9323, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.033946251768033946, |
|
"grad_norm": 3.0640249252319336, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.7148, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04526166902404526, |
|
"grad_norm": 3.3661434650421143, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.8608, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.056577086280056574, |
|
"grad_norm": 3.1292307376861572, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.2639, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06789250353606789, |
|
"grad_norm": 2.260249614715576, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 9.5547, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07920792079207921, |
|
"grad_norm": 2.1925923824310303, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 9.9299, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09052333804809053, |
|
"grad_norm": 2.2138164043426514, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.326, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.10183875530410184, |
|
"grad_norm": 1.9601538181304932, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 9.7972, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.11315417256011315, |
|
"grad_norm": 2.3362250328063965, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 9.9417, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12446958981612447, |
|
"grad_norm": 2.5164709091186523, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.341, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.13578500707213578, |
|
"grad_norm": 2.4237403869628906, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 9.0907, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1471004243281471, |
|
"grad_norm": 2.3622500896453857, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 7.2716, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.15841584158415842, |
|
"grad_norm": 2.7323813438415527, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 8.3576, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.16973125884016974, |
|
"grad_norm": 2.5983994007110596, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 8.8552, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.18104667609618105, |
|
"grad_norm": 2.3216371536254883, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 8.3984, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.19236209335219237, |
|
"grad_norm": 1.7305115461349487, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 8.3699, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2036775106082037, |
|
"grad_norm": 1.699713945388794, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 8.8624, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.214992927864215, |
|
"grad_norm": 1.6283882856369019, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 8.9808, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2263083451202263, |
|
"grad_norm": 1.6349085569381714, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 9.9526, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2376237623762376, |
|
"grad_norm": 1.7173153162002563, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 9.4905, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.24893917963224893, |
|
"grad_norm": 1.9035966396331787, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 9.6389, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.26025459688826025, |
|
"grad_norm": 1.9487980604171753, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 9.2311, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.27157001414427157, |
|
"grad_norm": 2.21173095703125, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 8.8328, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2828854314002829, |
|
"grad_norm": 2.991882562637329, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 8.3784, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2828854314002829, |
|
"eval_loss": 0.5269150733947754, |
|
"eval_runtime": 45.0688, |
|
"eval_samples_per_second": 13.202, |
|
"eval_steps_per_second": 1.664, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2942008486562942, |
|
"grad_norm": 0.8682360053062439, |
|
"learning_rate": 5e-05, |
|
"loss": 7.2005, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3055162659123055, |
|
"grad_norm": 0.9563512802124023, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 7.9264, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.31683168316831684, |
|
"grad_norm": 1.096703052520752, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 8.04, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.32814710042432815, |
|
"grad_norm": 1.0564768314361572, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 8.4954, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.33946251768033947, |
|
"grad_norm": 1.1875149011611938, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 8.2755, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3507779349363508, |
|
"grad_norm": 1.1350054740905762, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 8.7413, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3620933521923621, |
|
"grad_norm": 1.2498562335968018, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 9.1359, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3734087694483734, |
|
"grad_norm": 1.4139655828475952, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 8.9738, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.38472418670438474, |
|
"grad_norm": 1.4577665328979492, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 8.6911, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.39603960396039606, |
|
"grad_norm": 1.629506230354309, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 9.1795, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4073550212164074, |
|
"grad_norm": 1.74530029296875, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 9.2998, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4186704384724187, |
|
"grad_norm": 2.077608346939087, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 8.95, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.42998585572843, |
|
"grad_norm": 1.909153699874878, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 6.992, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.44130127298444133, |
|
"grad_norm": 0.8186285495758057, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 7.3094, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.4526166902404526, |
|
"grad_norm": 0.9286304116249084, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 7.8571, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4639321074964639, |
|
"grad_norm": 0.9646059274673462, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 8.4043, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.4752475247524752, |
|
"grad_norm": 0.9747306108474731, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 7.5189, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.48656294200848654, |
|
"grad_norm": 1.110140323638916, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 8.197, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.49787835926449786, |
|
"grad_norm": 1.2244906425476074, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 8.6932, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5091937765205092, |
|
"grad_norm": 1.2957104444503784, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 9.1472, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5205091937765205, |
|
"grad_norm": 1.3898643255233765, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 9.3223, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5318246110325319, |
|
"grad_norm": 1.5572975873947144, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 8.9785, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5431400282885431, |
|
"grad_norm": 1.7228946685791016, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 8.6731, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5544554455445545, |
|
"grad_norm": 2.0071144104003906, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 9.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5657708628005658, |
|
"grad_norm": 2.848938226699829, |
|
"learning_rate": 0.0, |
|
"loss": 7.8565, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5657708628005658, |
|
"eval_loss": 0.5200080275535583, |
|
"eval_runtime": 43.7314, |
|
"eval_samples_per_second": 13.606, |
|
"eval_steps_per_second": 1.715, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0954378319683912e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|