|
{ |
|
"best_metric": 0.6206071972846985, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.031140521603736863, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006228104320747372, |
|
"grad_norm": 34.961917877197266, |
|
"learning_rate": 5e-05, |
|
"loss": 4.4055, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006228104320747372, |
|
"eval_loss": 5.756421089172363, |
|
"eval_runtime": 463.9618, |
|
"eval_samples_per_second": 23.314, |
|
"eval_steps_per_second": 2.916, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012456208641494744, |
|
"grad_norm": 25.625226974487305, |
|
"learning_rate": 0.0001, |
|
"loss": 4.1662, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0018684312962242117, |
|
"grad_norm": 18.706308364868164, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 3.099, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002491241728298949, |
|
"grad_norm": 14.237258911132812, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 1.2839, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003114052160373686, |
|
"grad_norm": 7.653135299682617, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.7328, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0037368625924484233, |
|
"grad_norm": 7.604238986968994, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.6975, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004359673024523161, |
|
"grad_norm": 4.739212512969971, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 0.6526, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.004982483456597898, |
|
"grad_norm": 4.953818321228027, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.6595, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005605293888672635, |
|
"grad_norm": 5.076328754425049, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 0.8259, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006228104320747372, |
|
"grad_norm": 4.2572736740112305, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.7653, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00685091475282211, |
|
"grad_norm": 5.306685447692871, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.9771, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007473725184896847, |
|
"grad_norm": 5.197048187255859, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.1657, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.008096535616971585, |
|
"grad_norm": 5.541998863220215, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.0405, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.008719346049046322, |
|
"grad_norm": 4.117219924926758, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7929, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009342156481121059, |
|
"grad_norm": 3.033407688140869, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.5813, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009964966913195795, |
|
"grad_norm": 3.0356810092926025, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.5848, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.010587777345270534, |
|
"grad_norm": 3.6332268714904785, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.5569, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01121058777734527, |
|
"grad_norm": 4.638049602508545, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.575, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.011833398209420008, |
|
"grad_norm": 3.121140718460083, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.4935, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.012456208641494744, |
|
"grad_norm": 3.2301628589630127, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.6655, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.013079019073569483, |
|
"grad_norm": 3.529449462890625, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.6141, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01370182950564422, |
|
"grad_norm": 3.159022808074951, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.5717, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.014324639937718956, |
|
"grad_norm": 3.829219341278076, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.6012, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.014947450369793693, |
|
"grad_norm": 4.6542487144470215, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.832, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.015570260801868432, |
|
"grad_norm": 5.967838764190674, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.376, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.015570260801868432, |
|
"eval_loss": 0.6481190919876099, |
|
"eval_runtime": 464.1823, |
|
"eval_samples_per_second": 23.303, |
|
"eval_steps_per_second": 2.915, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01619307123394317, |
|
"grad_norm": 4.6942877769470215, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7529, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.016815881666017905, |
|
"grad_norm": 2.933812379837036, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.5804, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.017438692098092644, |
|
"grad_norm": 2.9494214057922363, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.6122, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.01806150253016738, |
|
"grad_norm": 3.9514541625976562, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.586, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.018684312962242117, |
|
"grad_norm": 2.456082820892334, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.5001, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.019307123394316856, |
|
"grad_norm": 3.063210964202881, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.6416, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01992993382639159, |
|
"grad_norm": 2.158895969390869, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.4718, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02055274425846633, |
|
"grad_norm": 3.134558916091919, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.6344, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.021175554690541068, |
|
"grad_norm": 5.159844875335693, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.5952, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.021798365122615803, |
|
"grad_norm": 3.6120400428771973, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.7819, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02242117555469054, |
|
"grad_norm": 3.597929000854492, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.753, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.023043985986765277, |
|
"grad_norm": 4.944601535797119, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.0505, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.023666796418840015, |
|
"grad_norm": 5.831929683685303, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.8816, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.024289606850914754, |
|
"grad_norm": 3.287022829055786, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.74, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02491241728298949, |
|
"grad_norm": 2.3541550636291504, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.4848, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.025535227715064227, |
|
"grad_norm": 3.2387115955352783, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.6658, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.026158038147138966, |
|
"grad_norm": 2.4414989948272705, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.5423, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0267808485792137, |
|
"grad_norm": 2.677088737487793, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.4431, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02740365901128844, |
|
"grad_norm": 2.6570982933044434, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.5932, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.028026469443363178, |
|
"grad_norm": 2.9952850341796875, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.5313, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.028649279875437913, |
|
"grad_norm": 2.977844476699829, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.5879, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.02927209030751265, |
|
"grad_norm": 2.791550397872925, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.5124, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.029894900739587386, |
|
"grad_norm": 3.966216802597046, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.7275, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.030517711171662125, |
|
"grad_norm": 4.126320838928223, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.7949, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.031140521603736863, |
|
"grad_norm": 5.07513952255249, |
|
"learning_rate": 0.0, |
|
"loss": 1.1318, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.031140521603736863, |
|
"eval_loss": 0.6206071972846985, |
|
"eval_runtime": 476.0766, |
|
"eval_samples_per_second": 22.721, |
|
"eval_steps_per_second": 2.842, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.435490789261312e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|