|
{ |
|
"best_metric": 0.738952100276947, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 2.443768996960486, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0486322188449848, |
|
"grad_norm": 122.75741577148438, |
|
"learning_rate": 5e-05, |
|
"loss": 3.2344, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0486322188449848, |
|
"eval_loss": 5.1622395515441895, |
|
"eval_runtime": 3.688, |
|
"eval_samples_per_second": 37.69, |
|
"eval_steps_per_second": 4.881, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0972644376899696, |
|
"grad_norm": 69.6625747680664, |
|
"learning_rate": 0.0001, |
|
"loss": 3.1183, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1458966565349544, |
|
"grad_norm": 94.78073120117188, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 4.9245, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1945288753799392, |
|
"grad_norm": 109.74983978271484, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 5.4097, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.24316109422492402, |
|
"grad_norm": 123.89290618896484, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 5.9994, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.2917933130699088, |
|
"grad_norm": 58.025917053222656, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.7611, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3404255319148936, |
|
"grad_norm": 58.173011779785156, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.4281, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3890577507598784, |
|
"grad_norm": 61.43299865722656, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.3761, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.4376899696048632, |
|
"grad_norm": 86.56910705566406, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 2.6181, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.48632218844984804, |
|
"grad_norm": 123.04452514648438, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 3.0133, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5349544072948328, |
|
"grad_norm": 64.24591064453125, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.0057, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5835866261398176, |
|
"grad_norm": 50.80318832397461, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.7955, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6322188449848024, |
|
"grad_norm": 103.4283447265625, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.5451, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6808510638297872, |
|
"grad_norm": 101.22402954101562, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.5553, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.729483282674772, |
|
"grad_norm": 70.3960189819336, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.5866, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7781155015197568, |
|
"grad_norm": 107.30152130126953, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.894, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8267477203647416, |
|
"grad_norm": 185.14161682128906, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.0501, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8753799392097265, |
|
"grad_norm": 63.85137176513672, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.1898, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.9240121580547113, |
|
"grad_norm": 68.57901763916016, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.1855, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.9726443768996961, |
|
"grad_norm": 81.24263000488281, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.1483, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.027355623100304, |
|
"grad_norm": 39.203556060791016, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.6328, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.0759878419452888, |
|
"grad_norm": 54.474021911621094, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.6113, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.1246200607902737, |
|
"grad_norm": 46.96053695678711, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.7844, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.1732522796352582, |
|
"grad_norm": 56.810081481933594, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.1084, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.221884498480243, |
|
"grad_norm": 121.81807708740234, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.0985, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.221884498480243, |
|
"eval_loss": 0.9295913577079773, |
|
"eval_runtime": 3.6736, |
|
"eval_samples_per_second": 37.837, |
|
"eval_steps_per_second": 4.9, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.2705167173252279, |
|
"grad_norm": 57.16689682006836, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8051, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.3191489361702127, |
|
"grad_norm": 32.02121353149414, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.4653, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.3677811550151975, |
|
"grad_norm": 48.5133056640625, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.825, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.4164133738601823, |
|
"grad_norm": 66.22747039794922, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.856, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.465045592705167, |
|
"grad_norm": 65.52440643310547, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.9969, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.513677811550152, |
|
"grad_norm": 52.595699310302734, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.7564, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.5623100303951367, |
|
"grad_norm": 56.82194519042969, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.5235, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.6109422492401215, |
|
"grad_norm": 49.04568862915039, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.7778, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.6595744680851063, |
|
"grad_norm": 61.08024978637695, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.8873, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.7082066869300911, |
|
"grad_norm": 59.74906921386719, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.772, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.756838905775076, |
|
"grad_norm": 39.438392639160156, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.8044, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.8054711246200608, |
|
"grad_norm": 44.79551696777344, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.5302, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.8541033434650456, |
|
"grad_norm": 41.50120162963867, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.6659, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.9027355623100304, |
|
"grad_norm": 92.41071319580078, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.8416, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.9513677811550152, |
|
"grad_norm": 57.740657806396484, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.7625, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0060790273556233, |
|
"grad_norm": 37.433349609375, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.7985, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.054711246200608, |
|
"grad_norm": 32.22288513183594, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.5062, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.103343465045593, |
|
"grad_norm": 32.619056701660156, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.558, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.1519756838905777, |
|
"grad_norm": 35.74100112915039, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.7794, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.2006079027355625, |
|
"grad_norm": 38.613834381103516, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.7587, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.2492401215805473, |
|
"grad_norm": 43.844451904296875, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.8223, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.297872340425532, |
|
"grad_norm": 27.004453659057617, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.4098, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.3465045592705165, |
|
"grad_norm": 28.623008728027344, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.5683, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.3951367781155017, |
|
"grad_norm": 36.22129821777344, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.8083, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.443768996960486, |
|
"grad_norm": 42.232688903808594, |
|
"learning_rate": 0.0, |
|
"loss": 0.9155, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.443768996960486, |
|
"eval_loss": 0.738952100276947, |
|
"eval_runtime": 3.6618, |
|
"eval_samples_per_second": 37.96, |
|
"eval_steps_per_second": 4.916, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.70274988097536e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|