|
{ |
|
"best_metric": 1.6017259359359741, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-10", |
|
"epoch": 0.024524831391784182, |
|
"eval_steps": 5, |
|
"global_step": 10, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002452483139178418, |
|
"grad_norm": 1.2774654626846313, |
|
"learning_rate": 2e-05, |
|
"loss": 2.0027, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002452483139178418, |
|
"eval_loss": 1.831758737564087, |
|
"eval_runtime": 57.2929, |
|
"eval_samples_per_second": 3.002, |
|
"eval_steps_per_second": 1.501, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004904966278356836, |
|
"grad_norm": 1.1944187879562378, |
|
"learning_rate": 4e-05, |
|
"loss": 2.0304, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007357449417535254, |
|
"grad_norm": 1.1539106369018555, |
|
"learning_rate": 6e-05, |
|
"loss": 1.8825, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009809932556713672, |
|
"grad_norm": 0.9850175976753235, |
|
"learning_rate": 8e-05, |
|
"loss": 1.933, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.012262415695892091, |
|
"grad_norm": 1.0971778631210327, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6274, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.012262415695892091, |
|
"eval_loss": 1.81316077709198, |
|
"eval_runtime": 58.6601, |
|
"eval_samples_per_second": 2.932, |
|
"eval_steps_per_second": 1.466, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014714898835070508, |
|
"grad_norm": 1.106279969215393, |
|
"learning_rate": 0.00012, |
|
"loss": 1.5355, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.017167381974248927, |
|
"grad_norm": 1.2348371744155884, |
|
"learning_rate": 0.00014, |
|
"loss": 1.866, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.019619865113427344, |
|
"grad_norm": 1.3462843894958496, |
|
"learning_rate": 0.00016, |
|
"loss": 1.9333, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.022072348252605765, |
|
"grad_norm": 1.5873693227767944, |
|
"learning_rate": 0.00018, |
|
"loss": 1.7938, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.024524831391784182, |
|
"grad_norm": 1.4388872385025024, |
|
"learning_rate": 0.0002, |
|
"loss": 1.5617, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.024524831391784182, |
|
"eval_loss": 1.6017259359359741, |
|
"eval_runtime": 57.6772, |
|
"eval_samples_per_second": 2.982, |
|
"eval_steps_per_second": 1.491, |
|
"step": 10 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3135311286632448.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|