|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.032258064516129, |
|
"eval_steps": 25, |
|
"global_step": 47, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06451612903225806, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06451612903225806, |
|
"eval_loss": NaN, |
|
"eval_runtime": 3.4585, |
|
"eval_samples_per_second": 14.457, |
|
"eval_steps_per_second": 3.759, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.12903225806451613, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1935483870967742, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.989038226169209e-05, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.956206309337068e-05, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.3225806451612903, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.901664203302126e-05, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3870967741935484, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.825677631722435e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.45161290322580644, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.610954559391703e-05, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5806451612903226, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.473264167865173e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.6451612903225806, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.316216432703917e-05, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7096774193548387, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.140576474687264e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.7741935483870968, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8387096774193549, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.73702910152393e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.9032258064516129, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.511087728614862e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.967741935483871, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.270476638965462e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.032258064516129, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.016368065618361e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.096774193548387, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.1612903225806452, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.472670160550849e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.2258064516129032, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.185729670371605e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.2903225806451613, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.890576474687263e-05, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.3548387096774195, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.588648530198504e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.4193548387096775, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.4838709677419355, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.970378084704441e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.5483870967741935, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.657047735161256e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.6129032258064515, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.342952264838747e-05, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.6129032258064515, |
|
"eval_loss": NaN, |
|
"eval_runtime": 3.0471, |
|
"eval_samples_per_second": 16.409, |
|
"eval_steps_per_second": 4.266, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.6774193548387095, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.0296219152955604e-05, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.7419354838709677, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.8064516129032258, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.411351469801496e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.870967741935484, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.109423525312738e-05, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.935483870967742, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.814270329628396e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5273298394491515e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.064516129032258, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.129032258064516, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.98363193438164e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.193548387096774, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.729523361034538e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.258064516129032, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4889122713851394e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.3225806451612905, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.2629708984760708e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.3870967741935485, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.4516129032258065, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8594235253127375e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.5161290322580645, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6837835672960835e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.5806451612903225, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.526735832134829e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.6451612903225805, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.389045440608296e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.709677419354839, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.774193548387097, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.174322368277565e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.838709677419355, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0983357966978745e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.903225806451613, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0437936906629336e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.967741935483871, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0109617738307912e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 3.032258064516129, |
|
"grad_norm": NaN, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 47, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.943606936512758e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|