|
{ |
|
"best_metric": 0.6613091826438904, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.09125128322117029, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001825025664423406, |
|
"grad_norm": 1.3225451707839966, |
|
"learning_rate": 5e-05, |
|
"loss": 0.97, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001825025664423406, |
|
"eval_loss": 2.524681568145752, |
|
"eval_runtime": 2.6698, |
|
"eval_samples_per_second": 18.728, |
|
"eval_steps_per_second": 4.869, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003650051328846812, |
|
"grad_norm": 1.6591840982437134, |
|
"learning_rate": 0.0001, |
|
"loss": 1.308, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0054750769932702175, |
|
"grad_norm": 1.876944899559021, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 1.4438, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007300102657693624, |
|
"grad_norm": 2.3761868476867676, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 1.353, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.009125128322117029, |
|
"grad_norm": 2.066288709640503, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 1.1193, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.010950153986540435, |
|
"grad_norm": 2.1301584243774414, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 1.1276, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.012775179650963841, |
|
"grad_norm": 1.1909555196762085, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 0.9841, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.014600205315387247, |
|
"grad_norm": 1.662056565284729, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 0.9273, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.016425230979810655, |
|
"grad_norm": 0.961665689945221, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 1.0642, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.018250256644234058, |
|
"grad_norm": 0.9523164629936218, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 1.0873, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.020075282308657464, |
|
"grad_norm": 1.3972877264022827, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 1.3219, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02190030797308087, |
|
"grad_norm": 0.9584366083145142, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 1.1643, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.023725333637504276, |
|
"grad_norm": 1.5735657215118408, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 0.5538, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.025550359301927682, |
|
"grad_norm": 1.2523537874221802, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.3921, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02737538496635109, |
|
"grad_norm": 0.676099956035614, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 0.5625, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.029200410630774495, |
|
"grad_norm": 0.852027952671051, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.7331, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0310254362951979, |
|
"grad_norm": 0.8262453675270081, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 0.8157, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03285046195962131, |
|
"grad_norm": 0.7066594958305359, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.7801, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03467548762404472, |
|
"grad_norm": 0.5954535007476807, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 0.7334, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.036500513288468116, |
|
"grad_norm": 0.5671157836914062, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.662, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03832553895289152, |
|
"grad_norm": 0.6849473714828491, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 0.8301, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04015056461731493, |
|
"grad_norm": 0.7973899841308594, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.8623, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.041975590281738334, |
|
"grad_norm": 0.8184006214141846, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 1.0401, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04380061594616174, |
|
"grad_norm": 0.9607341289520264, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 1.0293, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.045625641610585146, |
|
"grad_norm": 1.3468083143234253, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.9982, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.045625641610585146, |
|
"eval_loss": 0.7038731575012207, |
|
"eval_runtime": 2.0359, |
|
"eval_samples_per_second": 24.559, |
|
"eval_steps_per_second": 6.385, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04745066727500855, |
|
"grad_norm": 0.35777395963668823, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.336, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04927569293943196, |
|
"grad_norm": 0.45620062947273254, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 0.6215, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.051100718603855365, |
|
"grad_norm": 0.48935970664024353, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.636, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05292574426827877, |
|
"grad_norm": 0.44577130675315857, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 0.5593, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05475076993270218, |
|
"grad_norm": 0.6385313272476196, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.702, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05657579559712558, |
|
"grad_norm": 0.5207433104515076, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 0.682, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05840082126154899, |
|
"grad_norm": 0.5813780426979065, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.6973, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.060225846925972396, |
|
"grad_norm": 0.5056950449943542, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 0.7042, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0620508725903958, |
|
"grad_norm": 0.6021554470062256, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.8092, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06387589825481921, |
|
"grad_norm": 0.6422117948532104, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 0.9383, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06570092391924262, |
|
"grad_norm": 0.7147862315177917, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 1.022, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06752594958366602, |
|
"grad_norm": 0.7200132012367249, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.9203, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06935097524808943, |
|
"grad_norm": 0.38760125637054443, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.4126, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07117600091251283, |
|
"grad_norm": 0.31938913464546204, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 0.3984, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07300102657693623, |
|
"grad_norm": 0.37477895617485046, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.5531, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07482605224135964, |
|
"grad_norm": 0.45120054483413696, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 0.6015, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07665107790578304, |
|
"grad_norm": 0.4910838305950165, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.56, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07847610357020646, |
|
"grad_norm": 0.46236470341682434, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 0.6936, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.08030112923462986, |
|
"grad_norm": 0.5043991208076477, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.683, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08212615489905327, |
|
"grad_norm": 0.5607537627220154, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 0.8357, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08395118056347667, |
|
"grad_norm": 0.5264842510223389, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.6738, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08577620622790008, |
|
"grad_norm": 0.5766991972923279, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 0.7206, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08760123189232348, |
|
"grad_norm": 0.6205928325653076, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.9237, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0894262575567469, |
|
"grad_norm": 0.7550768852233887, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 1.0183, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.09125128322117029, |
|
"grad_norm": 0.8377938270568848, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9597, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09125128322117029, |
|
"eval_loss": 0.6613091826438904, |
|
"eval_runtime": 2.0395, |
|
"eval_samples_per_second": 24.516, |
|
"eval_steps_per_second": 6.374, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|