|
{ |
|
"best_metric": 0.6225014328956604, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.8298755186721992, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03319502074688797, |
|
"grad_norm": 70.7431869506836, |
|
"learning_rate": 5e-05, |
|
"loss": 23.395, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03319502074688797, |
|
"eval_loss": 1.9604383707046509, |
|
"eval_runtime": 2.3021, |
|
"eval_samples_per_second": 21.719, |
|
"eval_steps_per_second": 5.647, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06639004149377593, |
|
"grad_norm": 70.47954559326172, |
|
"learning_rate": 0.0001, |
|
"loss": 26.4463, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0995850622406639, |
|
"grad_norm": 81.28753662109375, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 24.136, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.13278008298755187, |
|
"grad_norm": 50.497074127197266, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 18.192, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.16597510373443983, |
|
"grad_norm": 55.77435302734375, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 15.9813, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1991701244813278, |
|
"grad_norm": 58.78676986694336, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 14.8981, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.23236514522821577, |
|
"grad_norm": 44.817684173583984, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 13.3566, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.26556016597510373, |
|
"grad_norm": 33.3582878112793, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 11.3925, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2987551867219917, |
|
"grad_norm": 22.51363182067871, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 10.1493, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.33195020746887965, |
|
"grad_norm": 26.438974380493164, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 12.8564, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3651452282157676, |
|
"grad_norm": 25.190418243408203, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 12.1554, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3983402489626556, |
|
"grad_norm": 24.64148712158203, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 12.3792, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.4315352697095436, |
|
"grad_norm": 21.904518127441406, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 12.9168, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.46473029045643155, |
|
"grad_norm": 23.861278533935547, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 11.9802, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.4979253112033195, |
|
"grad_norm": 26.47911262512207, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 12.5379, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5311203319502075, |
|
"grad_norm": 15.533278465270996, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 7.2533, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5643153526970954, |
|
"grad_norm": 19.37247657775879, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 9.8797, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.5975103734439834, |
|
"grad_norm": 25.750633239746094, |
|
"learning_rate": 7.75e-05, |
|
"loss": 11.3919, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.6307053941908713, |
|
"grad_norm": 19.59122657775879, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 10.1852, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.6639004149377593, |
|
"grad_norm": 19.977712631225586, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 11.1681, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.6970954356846473, |
|
"grad_norm": 21.2073974609375, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 9.8191, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.7302904564315352, |
|
"grad_norm": 22.94188117980957, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 11.3084, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.7634854771784232, |
|
"grad_norm": 19.7695369720459, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 9.4558, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.7966804979253111, |
|
"grad_norm": 17.488155364990234, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 8.7173, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.8298755186721992, |
|
"grad_norm": 17.16431999206543, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 9.8648, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.8298755186721992, |
|
"eval_loss": 0.6225014328956604, |
|
"eval_runtime": 2.3004, |
|
"eval_samples_per_second": 21.735, |
|
"eval_steps_per_second": 5.651, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.829010669142016e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|