|
{ |
|
"best_metric": 1.6913299560546875, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.016559032952475575, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000662361318099023, |
|
"grad_norm": 40.82220458984375, |
|
"learning_rate": 5e-05, |
|
"loss": 27.5824, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000662361318099023, |
|
"eval_loss": 2.2696444988250732, |
|
"eval_runtime": 432.347, |
|
"eval_samples_per_second": 23.525, |
|
"eval_steps_per_second": 2.942, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001324722636198046, |
|
"grad_norm": 36.23447799682617, |
|
"learning_rate": 0.0001, |
|
"loss": 27.0301, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001987083954297069, |
|
"grad_norm": 41.014190673828125, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 29.9936, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002649445272396092, |
|
"grad_norm": 36.32714080810547, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 27.1201, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003311806590495115, |
|
"grad_norm": 36.35200500488281, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 30.1767, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003974167908594138, |
|
"grad_norm": 34.66734313964844, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 32.16, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004636529226693161, |
|
"grad_norm": 27.75533103942871, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 28.1419, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005298890544792184, |
|
"grad_norm": 39.82036209106445, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 31.4935, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005961251862891207, |
|
"grad_norm": 32.18791198730469, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 29.9285, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00662361318099023, |
|
"grad_norm": 33.4499626159668, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 29.7212, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007285974499089253, |
|
"grad_norm": 38.62579345703125, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 33.2777, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007948335817188276, |
|
"grad_norm": 36.961639404296875, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 31.1595, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0086106971352873, |
|
"grad_norm": 37.11787414550781, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 30.9798, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.009273058453386322, |
|
"grad_norm": 35.7690315246582, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 26.4066, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009935419771485345, |
|
"grad_norm": 25.68424415588379, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 26.8711, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010597781089584368, |
|
"grad_norm": 20.35919189453125, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 24.8348, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.011260142407683391, |
|
"grad_norm": 20.65962028503418, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 24.6985, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011922503725782414, |
|
"grad_norm": 20.867441177368164, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 26.0421, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.012584865043881437, |
|
"grad_norm": 23.38364028930664, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 25.5464, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01324722636198046, |
|
"grad_norm": 24.096189498901367, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 26.3065, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.013909587680079483, |
|
"grad_norm": 25.358400344848633, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 27.5427, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.014571948998178506, |
|
"grad_norm": 25.04808235168457, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 27.4076, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01523431031627753, |
|
"grad_norm": 29.721607208251953, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 27.733, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.015896671634376552, |
|
"grad_norm": 34.963172912597656, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 29.4103, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.016559032952475575, |
|
"grad_norm": 36.04214096069336, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 29.9496, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.016559032952475575, |
|
"eval_loss": 1.6913299560546875, |
|
"eval_runtime": 435.6417, |
|
"eval_samples_per_second": 23.347, |
|
"eval_steps_per_second": 2.92, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.217745394630656e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|