|
{ |
|
"best_metric": 2.088284492492676, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.050100200400801605, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001002004008016032, |
|
"grad_norm": 1.9892041683197021, |
|
"learning_rate": 1e-05, |
|
"loss": 3.228, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001002004008016032, |
|
"eval_loss": 3.713639497756958, |
|
"eval_runtime": 9.9764, |
|
"eval_samples_per_second": 168.497, |
|
"eval_steps_per_second": 21.15, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002004008016032064, |
|
"grad_norm": 2.06142520904541, |
|
"learning_rate": 2e-05, |
|
"loss": 3.3001, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.003006012024048096, |
|
"grad_norm": 1.839006781578064, |
|
"learning_rate": 3e-05, |
|
"loss": 3.1721, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004008016032064128, |
|
"grad_norm": 2.000424861907959, |
|
"learning_rate": 4e-05, |
|
"loss": 3.388, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00501002004008016, |
|
"grad_norm": 1.9487165212631226, |
|
"learning_rate": 5e-05, |
|
"loss": 3.2505, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006012024048096192, |
|
"grad_norm": 1.8743599653244019, |
|
"learning_rate": 6e-05, |
|
"loss": 3.3262, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0070140280561122245, |
|
"grad_norm": 1.6559370756149292, |
|
"learning_rate": 7e-05, |
|
"loss": 3.2131, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.008016032064128256, |
|
"grad_norm": 1.6868617534637451, |
|
"learning_rate": 8e-05, |
|
"loss": 3.1389, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.009018036072144289, |
|
"grad_norm": 1.593652606010437, |
|
"learning_rate": 9e-05, |
|
"loss": 3.1291, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01002004008016032, |
|
"grad_norm": 1.531032681465149, |
|
"learning_rate": 0.0001, |
|
"loss": 3.1035, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011022044088176353, |
|
"grad_norm": 1.59713876247406, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 3.0948, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.012024048096192385, |
|
"grad_norm": 1.5284147262573242, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.8201, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.013026052104208416, |
|
"grad_norm": 1.542348027229309, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 2.8589, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.014028056112224449, |
|
"grad_norm": 1.7238450050354004, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.8647, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01503006012024048, |
|
"grad_norm": 1.7485935688018799, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.7815, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01603206412825651, |
|
"grad_norm": 1.9041484594345093, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.751, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.017034068136272545, |
|
"grad_norm": 1.754050850868225, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.6011, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.018036072144288578, |
|
"grad_norm": 1.5712624788284302, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.6006, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01903807615230461, |
|
"grad_norm": 1.6208561658859253, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.57, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02004008016032064, |
|
"grad_norm": 1.7021604776382446, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.5362, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.021042084168336674, |
|
"grad_norm": 1.6615126132965088, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.4146, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.022044088176352707, |
|
"grad_norm": 1.4819920063018799, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.4761, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.023046092184368736, |
|
"grad_norm": 1.5472745895385742, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.3338, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02404809619238477, |
|
"grad_norm": 1.9091078042984009, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.3523, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.025050100200400802, |
|
"grad_norm": 1.7454297542572021, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.3323, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.026052104208416832, |
|
"grad_norm": 1.334761142730713, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.3424, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.027054108216432865, |
|
"grad_norm": 1.5471727848052979, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.3023, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.028056112224448898, |
|
"grad_norm": 2.1018128395080566, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.3096, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02905811623246493, |
|
"grad_norm": 2.2362749576568604, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 2.4362, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03006012024048096, |
|
"grad_norm": 1.7347625494003296, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.4654, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.031062124248496994, |
|
"grad_norm": 1.1744818687438965, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 2.2959, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03206412825651302, |
|
"grad_norm": 1.3074371814727783, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.2445, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.033066132264529056, |
|
"grad_norm": 1.2176134586334229, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 2.212, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03406813627254509, |
|
"grad_norm": 1.319883942604065, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.2957, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03507014028056112, |
|
"grad_norm": 1.2122143507003784, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 2.3501, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.036072144288577156, |
|
"grad_norm": 1.1644178628921509, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.226, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03707414829659319, |
|
"grad_norm": 1.1486573219299316, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.2286, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03807615230460922, |
|
"grad_norm": 1.1184206008911133, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.1459, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.03907815631262525, |
|
"grad_norm": 1.2232961654663086, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.2814, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04008016032064128, |
|
"grad_norm": 1.3489000797271729, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.3264, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.041082164328657314, |
|
"grad_norm": 1.279327154159546, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 2.3779, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04208416833667335, |
|
"grad_norm": 1.269199013710022, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.26, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04308617234468938, |
|
"grad_norm": 1.5359389781951904, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 2.355, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04408817635270541, |
|
"grad_norm": 1.267220377922058, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.1603, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.045090180360721446, |
|
"grad_norm": 1.2115416526794434, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.0991, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04609218436873747, |
|
"grad_norm": 1.1847515106201172, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.1158, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.047094188376753505, |
|
"grad_norm": 1.1961596012115479, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 2.1763, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.04809619238476954, |
|
"grad_norm": 2.5090718269348145, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.2434, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.04909819639278557, |
|
"grad_norm": 1.6135358810424805, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 2.3868, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.050100200400801605, |
|
"grad_norm": 1.4976840019226074, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.3013, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.050100200400801605, |
|
"eval_loss": 2.088284492492676, |
|
"eval_runtime": 9.5901, |
|
"eval_samples_per_second": 175.286, |
|
"eval_steps_per_second": 22.002, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1488394911744000.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|