|
{ |
|
"best_metric": 1.733486294746399, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-30", |
|
"epoch": 0.0015142720137293997, |
|
"eval_steps": 5, |
|
"global_step": 30, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.0475733790979985e-05, |
|
"eval_loss": 1.9227821826934814, |
|
"eval_runtime": 1395.4982, |
|
"eval_samples_per_second": 5.978, |
|
"eval_steps_per_second": 2.989, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00015142720137293996, |
|
"grad_norm": 0.37583640217781067, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9299, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0002523786689548999, |
|
"eval_loss": 1.9086353778839111, |
|
"eval_runtime": 1404.9667, |
|
"eval_samples_per_second": 5.938, |
|
"eval_steps_per_second": 2.969, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0003028544027458799, |
|
"grad_norm": 0.4073977768421173, |
|
"learning_rate": 6e-05, |
|
"loss": 1.8799, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00045428160411881987, |
|
"grad_norm": 0.49786943197250366, |
|
"learning_rate": 9e-05, |
|
"loss": 1.8351, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005047573379097999, |
|
"eval_loss": 1.800294280052185, |
|
"eval_runtime": 1404.6504, |
|
"eval_samples_per_second": 5.939, |
|
"eval_steps_per_second": 2.969, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0006057088054917598, |
|
"grad_norm": 0.31189802289009094, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.7429, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0007571360068646998, |
|
"grad_norm": 0.49632543325424194, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.7142, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0007571360068646998, |
|
"eval_loss": 1.7793023586273193, |
|
"eval_runtime": 1404.032, |
|
"eval_samples_per_second": 5.941, |
|
"eval_steps_per_second": 2.971, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0009085632082376397, |
|
"grad_norm": 0.2652880847454071, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.773, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0010095146758195997, |
|
"eval_loss": 1.7412056922912598, |
|
"eval_runtime": 1403.7973, |
|
"eval_samples_per_second": 5.942, |
|
"eval_steps_per_second": 2.971, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0010599904096105796, |
|
"grad_norm": 0.23081372678279877, |
|
"learning_rate": 4.2178276747988446e-05, |
|
"loss": 1.8133, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0012114176109835196, |
|
"grad_norm": 0.24668963253498077, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 1.7785, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0012618933447744996, |
|
"eval_loss": 1.735094666481018, |
|
"eval_runtime": 1403.5657, |
|
"eval_samples_per_second": 5.943, |
|
"eval_steps_per_second": 2.972, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0013628448123564597, |
|
"grad_norm": 0.2573795020580292, |
|
"learning_rate": 5.449673790581611e-06, |
|
"loss": 1.6955, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0015142720137293997, |
|
"grad_norm": 0.25954729318618774, |
|
"learning_rate": 0.0, |
|
"loss": 1.7924, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0015142720137293997, |
|
"eval_loss": 1.733486294746399, |
|
"eval_runtime": 1403.6226, |
|
"eval_samples_per_second": 5.943, |
|
"eval_steps_per_second": 2.972, |
|
"step": 30 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 30, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.388598679044096e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|