|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.2557544757033248, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005115089514066497, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005115089514066497, |
|
"eval_loss": NaN, |
|
"eval_runtime": 24.3768, |
|
"eval_samples_per_second": 13.537, |
|
"eval_steps_per_second": 1.723, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010230179028132993, |
|
"grad_norm": NaN, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.015345268542199489, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.020460358056265986, |
|
"grad_norm": NaN, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02557544757033248, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.030690537084398978, |
|
"grad_norm": NaN, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03580562659846547, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04092071611253197, |
|
"grad_norm": NaN, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04603580562659847, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05115089514066496, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.056265984654731455, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.061381074168797956, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06649616368286446, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07161125319693094, |
|
"grad_norm": NaN, |
|
"learning_rate": 7e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.07672634271099744, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08184143222506395, |
|
"grad_norm": NaN, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.08695652173913043, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.5e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09207161125319693, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09718670076726342, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.5e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10230179028132992, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10741687979539642, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11253196930946291, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11764705882352941, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12276214833759591, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.1278772378516624, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1278772378516624, |
|
"eval_loss": NaN, |
|
"eval_runtime": 24.2316, |
|
"eval_samples_per_second": 13.619, |
|
"eval_steps_per_second": 1.733, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1329923273657289, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13810741687979539, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1432225063938619, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.1483375959079284, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1534526854219949, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1585677749360614, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1636828644501279, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.16879795396419436, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.17391304347826086, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.17902813299232737, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18414322250639387, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18925831202046037, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.19437340153452684, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.19948849104859334, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20460358056265984, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.20971867007672634, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.21483375959079284, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.21994884910485935, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.22506393861892582, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.23017902813299232, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.24040920716112532, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.24552429667519182, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2506393861892583, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2557544757033248, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2557544757033248, |
|
"eval_loss": NaN, |
|
"eval_runtime": 24.2345, |
|
"eval_samples_per_second": 13.617, |
|
"eval_steps_per_second": 1.733, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.54256789372928e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|