|
{ |
|
"best_metric": 0.5620399713516235, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.9900497512437811, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03980099502487562, |
|
"grad_norm": 0.13307230174541473, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7752, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03980099502487562, |
|
"eval_loss": 0.7566336393356323, |
|
"eval_runtime": 5.0659, |
|
"eval_samples_per_second": 33.558, |
|
"eval_steps_per_second": 4.343, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07960199004975124, |
|
"grad_norm": 0.1349344104528427, |
|
"learning_rate": 0.0001, |
|
"loss": 0.756, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.11940298507462686, |
|
"grad_norm": 0.13909032940864563, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 0.7531, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.15920398009950248, |
|
"grad_norm": 0.14223748445510864, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.7397, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.19900497512437812, |
|
"grad_norm": 0.19557783007621765, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.7403, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.23880597014925373, |
|
"grad_norm": 0.12540394067764282, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.6391, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.27860696517412936, |
|
"grad_norm": 0.12868504226207733, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 0.6688, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.31840796019900497, |
|
"grad_norm": 0.14338752627372742, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.6822, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.3582089552238806, |
|
"grad_norm": 0.1410781592130661, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 0.6879, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.39800995024875624, |
|
"grad_norm": 0.10805737972259521, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.6344, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.43781094527363185, |
|
"grad_norm": 0.11142578721046448, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.6047, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.47761194029850745, |
|
"grad_norm": 0.09217383712530136, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.6048, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5174129353233831, |
|
"grad_norm": 0.06935001164674759, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.62, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.5572139303482587, |
|
"grad_norm": 0.07017137110233307, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.6364, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.07594887167215347, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.6402, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.6368159203980099, |
|
"grad_norm": 0.07854108512401581, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.6215, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.6766169154228856, |
|
"grad_norm": 0.07627039402723312, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.5876, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.7164179104477612, |
|
"grad_norm": 0.09767764806747437, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.5739, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.7562189054726368, |
|
"grad_norm": 0.0784846842288971, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.5987, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7960199004975125, |
|
"grad_norm": 0.0701882392168045, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.6514, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.835820895522388, |
|
"grad_norm": 0.07107464969158173, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.6176, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8756218905472637, |
|
"grad_norm": 0.0965135395526886, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.5857, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.9154228855721394, |
|
"grad_norm": 0.08715957403182983, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.5829, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.9552238805970149, |
|
"grad_norm": 0.07449861615896225, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.5732, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9950248756218906, |
|
"grad_norm": 0.07877979427576065, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.5357, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9950248756218906, |
|
"eval_loss": 0.5807715058326721, |
|
"eval_runtime": 11.1585, |
|
"eval_samples_per_second": 15.235, |
|
"eval_steps_per_second": 1.972, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0348258706467661, |
|
"grad_norm": 0.22895914316177368, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1264, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.0746268656716418, |
|
"grad_norm": 0.14677956700325012, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.5921, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.1144278606965174, |
|
"grad_norm": 0.11105823516845703, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.5877, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.154228855721393, |
|
"grad_norm": 0.1271418184041977, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.5577, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.1940298507462686, |
|
"grad_norm": 0.1278061419725418, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.5555, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2338308457711442, |
|
"grad_norm": 0.14156891405582428, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.5235, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.2736318407960199, |
|
"grad_norm": 0.11495278775691986, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.5603, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.3134328358208955, |
|
"grad_norm": 0.11897273361682892, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.5988, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.3532338308457712, |
|
"grad_norm": 0.12612038850784302, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.5884, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.3930348258706466, |
|
"grad_norm": 0.11991294473409653, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.5569, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.4328358208955223, |
|
"grad_norm": 0.1252669394016266, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.5409, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.472636815920398, |
|
"grad_norm": 0.1387309730052948, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.5438, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.5124378109452736, |
|
"grad_norm": 0.1291898936033249, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.5205, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.5522388059701493, |
|
"grad_norm": 0.12257789820432663, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.5969, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.5920398009950247, |
|
"grad_norm": 0.12527777254581451, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.56, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6318407960199006, |
|
"grad_norm": 0.12541717290878296, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.5398, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.671641791044776, |
|
"grad_norm": 0.12988954782485962, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.5432, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.7114427860696517, |
|
"grad_norm": 0.13142308592796326, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.553, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.7512437810945274, |
|
"grad_norm": 0.1271471232175827, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.4937, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.7910447761194028, |
|
"grad_norm": 0.11726174503564835, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.6301, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.8308457711442787, |
|
"grad_norm": 0.12278467416763306, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.5612, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.8706467661691542, |
|
"grad_norm": 0.12573637068271637, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.5555, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.9104477611940298, |
|
"grad_norm": 0.12766633927822113, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.5573, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.9502487562189055, |
|
"grad_norm": 0.14149890840053558, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.5547, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.9900497512437811, |
|
"grad_norm": 0.14994728565216064, |
|
"learning_rate": 0.0, |
|
"loss": 0.4937, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.9900497512437811, |
|
"eval_loss": 0.5620399713516235, |
|
"eval_runtime": 11.1558, |
|
"eval_samples_per_second": 15.239, |
|
"eval_steps_per_second": 1.972, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.259156315439104e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|