eeeebbb2's picture
Training in progress, step 34, checkpoint
53596f5 verified
{
"best_metric": 0.7971799373626709,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.073446327683616,
"eval_steps": 25,
"global_step": 34,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0903954802259887,
"grad_norm": 36.14955520629883,
"learning_rate": 5e-05,
"loss": 7.4587,
"step": 1
},
{
"epoch": 0.0903954802259887,
"eval_loss": 7.517584800720215,
"eval_runtime": 3.9481,
"eval_samples_per_second": 12.664,
"eval_steps_per_second": 3.293,
"step": 1
},
{
"epoch": 0.1807909604519774,
"grad_norm": 36.21140670776367,
"learning_rate": 0.0001,
"loss": 7.4388,
"step": 2
},
{
"epoch": 0.2711864406779661,
"grad_norm": 49.9202880859375,
"learning_rate": 9.978331270024886e-05,
"loss": 4.5541,
"step": 3
},
{
"epoch": 0.3615819209039548,
"grad_norm": 18.69330596923828,
"learning_rate": 9.913533761814537e-05,
"loss": 1.6457,
"step": 4
},
{
"epoch": 0.4519774011299435,
"grad_norm": 6.892326354980469,
"learning_rate": 9.80623151079494e-05,
"loss": 1.0008,
"step": 5
},
{
"epoch": 0.5423728813559322,
"grad_norm": 6.923241138458252,
"learning_rate": 9.657457896300791e-05,
"loss": 0.9578,
"step": 6
},
{
"epoch": 0.632768361581921,
"grad_norm": 17.618833541870117,
"learning_rate": 9.468645689567598e-05,
"loss": 1.0453,
"step": 7
},
{
"epoch": 0.7231638418079096,
"grad_norm": 2.573690414428711,
"learning_rate": 9.241613255361455e-05,
"loss": 0.8191,
"step": 8
},
{
"epoch": 0.8135593220338984,
"grad_norm": 9.989768028259277,
"learning_rate": 8.978547040132317e-05,
"loss": 1.1795,
"step": 9
},
{
"epoch": 0.903954802259887,
"grad_norm": 1.8748440742492676,
"learning_rate": 8.681980515339464e-05,
"loss": 0.8322,
"step": 10
},
{
"epoch": 0.9943502824858758,
"grad_norm": 6.087851524353027,
"learning_rate": 8.354769778736406e-05,
"loss": 0.9989,
"step": 11
},
{
"epoch": 1.0847457627118644,
"grad_norm": 8.095987319946289,
"learning_rate": 8.000066048588211e-05,
"loss": 1.7209,
"step": 12
},
{
"epoch": 1.1751412429378532,
"grad_norm": 5.422818183898926,
"learning_rate": 7.62128531571699e-05,
"loss": 0.8171,
"step": 13
},
{
"epoch": 1.2655367231638417,
"grad_norm": 6.045770645141602,
"learning_rate": 7.222075445642904e-05,
"loss": 0.9118,
"step": 14
},
{
"epoch": 1.3559322033898304,
"grad_norm": 2.086815357208252,
"learning_rate": 6.80628104764508e-05,
"loss": 0.8146,
"step": 15
},
{
"epoch": 1.4463276836158192,
"grad_norm": 1.1806037425994873,
"learning_rate": 6.377906449072578e-05,
"loss": 0.7984,
"step": 16
},
{
"epoch": 1.536723163841808,
"grad_norm": 3.5935990810394287,
"learning_rate": 5.941077131483025e-05,
"loss": 0.9019,
"step": 17
},
{
"epoch": 1.6271186440677967,
"grad_norm": 0.803607165813446,
"learning_rate": 5.500000000000001e-05,
"loss": 0.7858,
"step": 18
},
{
"epoch": 1.7175141242937855,
"grad_norm": 0.7346743941307068,
"learning_rate": 5.058922868516978e-05,
"loss": 0.7894,
"step": 19
},
{
"epoch": 1.807909604519774,
"grad_norm": 6.340599536895752,
"learning_rate": 4.6220935509274235e-05,
"loss": 0.9671,
"step": 20
},
{
"epoch": 1.8983050847457628,
"grad_norm": 0.5824771523475647,
"learning_rate": 4.19371895235492e-05,
"loss": 0.769,
"step": 21
},
{
"epoch": 1.9887005649717513,
"grad_norm": 4.109841823577881,
"learning_rate": 3.777924554357096e-05,
"loss": 0.8763,
"step": 22
},
{
"epoch": 2.07909604519774,
"grad_norm": 2.5904746055603027,
"learning_rate": 3.378714684283011e-05,
"loss": 1.5647,
"step": 23
},
{
"epoch": 2.169491525423729,
"grad_norm": 1.8928107023239136,
"learning_rate": 2.9999339514117912e-05,
"loss": 0.7833,
"step": 24
},
{
"epoch": 2.2598870056497176,
"grad_norm": 4.098468780517578,
"learning_rate": 2.645230221263596e-05,
"loss": 0.8615,
"step": 25
},
{
"epoch": 2.2598870056497176,
"eval_loss": 0.7971799373626709,
"eval_runtime": 4.0319,
"eval_samples_per_second": 12.401,
"eval_steps_per_second": 3.224,
"step": 25
},
{
"epoch": 2.3502824858757063,
"grad_norm": 2.6116600036621094,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.7807,
"step": 26
},
{
"epoch": 2.440677966101695,
"grad_norm": 2.5598981380462646,
"learning_rate": 2.0214529598676836e-05,
"loss": 0.8252,
"step": 27
},
{
"epoch": 2.5310734463276834,
"grad_norm": 3.7141711711883545,
"learning_rate": 1.758386744638546e-05,
"loss": 0.8265,
"step": 28
},
{
"epoch": 2.621468926553672,
"grad_norm": 2.038482427597046,
"learning_rate": 1.531354310432403e-05,
"loss": 0.7861,
"step": 29
},
{
"epoch": 2.711864406779661,
"grad_norm": 1.7209573984146118,
"learning_rate": 1.3425421036992098e-05,
"loss": 0.7789,
"step": 30
},
{
"epoch": 2.8022598870056497,
"grad_norm": 4.278275489807129,
"learning_rate": 1.1937684892050604e-05,
"loss": 0.8473,
"step": 31
},
{
"epoch": 2.8926553672316384,
"grad_norm": 1.0892584323883057,
"learning_rate": 1.0864662381854632e-05,
"loss": 0.78,
"step": 32
},
{
"epoch": 2.983050847457627,
"grad_norm": 1.513576626777649,
"learning_rate": 1.0216687299751144e-05,
"loss": 0.8202,
"step": 33
},
{
"epoch": 3.073446327683616,
"grad_norm": 2.245659589767456,
"learning_rate": 1e-05,
"loss": 1.6204,
"step": 34
}
],
"logging_steps": 1,
"max_steps": 34,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.198645954238218e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}