|
{ |
|
"best_metric": 1.5995488166809082, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.6218422075398368, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012436844150796735, |
|
"grad_norm": 4.070582389831543, |
|
"learning_rate": 5e-05, |
|
"loss": 4.2908, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.012436844150796735, |
|
"eval_loss": 6.697080135345459, |
|
"eval_runtime": 4.2989, |
|
"eval_samples_per_second": 11.631, |
|
"eval_steps_per_second": 3.024, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02487368830159347, |
|
"grad_norm": 4.343489646911621, |
|
"learning_rate": 0.0001, |
|
"loss": 4.86, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03731053245239021, |
|
"grad_norm": 4.6962361335754395, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 4.8283, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04974737660318694, |
|
"grad_norm": 4.616434097290039, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 4.0479, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06218422075398368, |
|
"grad_norm": 4.6326904296875, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 3.208, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07462106490478042, |
|
"grad_norm": 3.4674623012542725, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.7493, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.08705790905557714, |
|
"grad_norm": 3.225517749786377, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 2.5875, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09949475320637388, |
|
"grad_norm": 3.2241368293762207, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 2.4383, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11193159735717062, |
|
"grad_norm": 3.3204712867736816, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 2.211, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.12436844150796736, |
|
"grad_norm": 2.812617301940918, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 2.0978, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1368052856587641, |
|
"grad_norm": 2.866771936416626, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 1.8688, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.14924212980956084, |
|
"grad_norm": 6.981631755828857, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 1.6069, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.16167897396035755, |
|
"grad_norm": 10.564940452575684, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 2.8817, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.17411581811115429, |
|
"grad_norm": 8.371127128601074, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.7034, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.18655266226195102, |
|
"grad_norm": 3.5405235290527344, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 2.462, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.19898950641274776, |
|
"grad_norm": 2.1815154552459717, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 2.2772, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2114263505635445, |
|
"grad_norm": 2.2907159328460693, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 2.1571, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.22386319471434124, |
|
"grad_norm": 2.2352960109710693, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.1092, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.23630003886513798, |
|
"grad_norm": 2.1429026126861572, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 2.1112, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.24873688301593472, |
|
"grad_norm": 2.0608441829681396, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 1.9141, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.26117372716673143, |
|
"grad_norm": 1.994292974472046, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 1.7083, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2736105713175282, |
|
"grad_norm": 1.7245221138000488, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 1.7911, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2860474154683249, |
|
"grad_norm": 1.8327735662460327, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 1.6479, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2984842596191217, |
|
"grad_norm": 1.897107720375061, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 1.3578, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3109211037699184, |
|
"grad_norm": 7.827093124389648, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.7274, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3109211037699184, |
|
"eval_loss": 1.8228825330734253, |
|
"eval_runtime": 4.3671, |
|
"eval_samples_per_second": 11.449, |
|
"eval_steps_per_second": 2.977, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3233579479207151, |
|
"grad_norm": 3.0982296466827393, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.3577, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.33579479207151186, |
|
"grad_norm": 2.92927885055542, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 2.2911, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.34823163622230857, |
|
"grad_norm": 2.458441972732544, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 2.0293, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.36066848037310534, |
|
"grad_norm": 2.1433932781219482, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 2.0705, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.37310532452390205, |
|
"grad_norm": 1.8144245147705078, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 1.8359, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3855421686746988, |
|
"grad_norm": 1.615573525428772, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 1.8279, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3979790128254955, |
|
"grad_norm": 1.7636524438858032, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 1.9141, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4104158569762923, |
|
"grad_norm": 1.6936835050582886, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 1.9035, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.422852701127089, |
|
"grad_norm": 1.7628343105316162, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.5598, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4352895452778857, |
|
"grad_norm": 2.0483028888702393, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 1.6121, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4477263894286825, |
|
"grad_norm": 2.3952748775482178, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 1.2943, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4601632335794792, |
|
"grad_norm": 2.835444211959839, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.9927, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.47260007773027596, |
|
"grad_norm": 1.7717387676239014, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.8088, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.48503692188107267, |
|
"grad_norm": 1.5801821947097778, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 2.074, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.49747376603186944, |
|
"grad_norm": 1.5096017122268677, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 1.9525, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5099106101826661, |
|
"grad_norm": 1.5900380611419678, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 1.7708, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5223474543334629, |
|
"grad_norm": 1.4869619607925415, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 1.9034, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5347842984842596, |
|
"grad_norm": 1.295594573020935, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 1.8161, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5472211426350564, |
|
"grad_norm": 1.4549026489257812, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 1.7517, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5596579867858531, |
|
"grad_norm": 1.751529335975647, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 1.7905, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5720948309366498, |
|
"grad_norm": 1.6213444471359253, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 1.5946, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5845316750874465, |
|
"grad_norm": 1.8312252759933472, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 1.5736, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5969685192382433, |
|
"grad_norm": 1.7476670742034912, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 1.4217, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6094053633890401, |
|
"grad_norm": 1.5351223945617676, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 0.9661, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6218422075398368, |
|
"grad_norm": 1.8565118312835693, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4582, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6218422075398368, |
|
"eval_loss": 1.5995488166809082, |
|
"eval_runtime": 4.3433, |
|
"eval_samples_per_second": 11.512, |
|
"eval_steps_per_second": 2.993, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|