dsakerkwq's picture
Training in progress, step 50, checkpoint
d6929d9 verified
{
"best_metric": 2.300698757171631,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.018412815319462345,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0003682563063892469,
"grad_norm": 1.91280996799469,
"learning_rate": 5e-05,
"loss": 2.1982,
"step": 1
},
{
"epoch": 0.0003682563063892469,
"eval_loss": 3.6061596870422363,
"eval_runtime": 1.5161,
"eval_samples_per_second": 32.978,
"eval_steps_per_second": 8.574,
"step": 1
},
{
"epoch": 0.0007365126127784938,
"grad_norm": 2.0957469940185547,
"learning_rate": 0.0001,
"loss": 2.4763,
"step": 2
},
{
"epoch": 0.0011047689191677407,
"grad_norm": 1.9613713026046753,
"learning_rate": 9.990365154573717e-05,
"loss": 2.5186,
"step": 3
},
{
"epoch": 0.0014730252255569876,
"grad_norm": 1.4553881883621216,
"learning_rate": 9.961501876182148e-05,
"loss": 2.4343,
"step": 4
},
{
"epoch": 0.0018412815319462346,
"grad_norm": 1.5710747241973877,
"learning_rate": 9.913533761814537e-05,
"loss": 2.3533,
"step": 5
},
{
"epoch": 0.0022095378383354815,
"grad_norm": 1.426024317741394,
"learning_rate": 9.846666218300807e-05,
"loss": 2.2975,
"step": 6
},
{
"epoch": 0.0025777941447247283,
"grad_norm": 1.3779783248901367,
"learning_rate": 9.761185582727977e-05,
"loss": 2.4956,
"step": 7
},
{
"epoch": 0.002946050451113975,
"grad_norm": 1.282586693763733,
"learning_rate": 9.657457896300791e-05,
"loss": 2.5275,
"step": 8
},
{
"epoch": 0.0033143067575032224,
"grad_norm": 1.0497735738754272,
"learning_rate": 9.535927336897098e-05,
"loss": 2.6698,
"step": 9
},
{
"epoch": 0.0036825630638924692,
"grad_norm": 1.044584035873413,
"learning_rate": 9.397114317029975e-05,
"loss": 2.7479,
"step": 10
},
{
"epoch": 0.004050819370281716,
"grad_norm": 1.3881807327270508,
"learning_rate": 9.241613255361455e-05,
"loss": 2.9648,
"step": 11
},
{
"epoch": 0.004419075676670963,
"grad_norm": 2.0393691062927246,
"learning_rate": 9.070090031310558e-05,
"loss": 3.2128,
"step": 12
},
{
"epoch": 0.00478733198306021,
"grad_norm": 0.870771050453186,
"learning_rate": 8.883279133655399e-05,
"loss": 1.7127,
"step": 13
},
{
"epoch": 0.005155588289449457,
"grad_norm": 0.8859942555427551,
"learning_rate": 8.681980515339464e-05,
"loss": 1.8588,
"step": 14
},
{
"epoch": 0.005523844595838704,
"grad_norm": 0.8486651182174683,
"learning_rate": 8.467056167950311e-05,
"loss": 2.0634,
"step": 15
},
{
"epoch": 0.00589210090222795,
"grad_norm": 0.7893238663673401,
"learning_rate": 8.239426430539243e-05,
"loss": 2.0783,
"step": 16
},
{
"epoch": 0.0062603572086171975,
"grad_norm": 0.6707751154899597,
"learning_rate": 8.000066048588211e-05,
"loss": 2.106,
"step": 17
},
{
"epoch": 0.006628613515006445,
"grad_norm": 0.6826125979423523,
"learning_rate": 7.75e-05,
"loss": 2.2365,
"step": 18
},
{
"epoch": 0.006996869821395691,
"grad_norm": 0.6989638209342957,
"learning_rate": 7.490299105985507e-05,
"loss": 2.1631,
"step": 19
},
{
"epoch": 0.0073651261277849385,
"grad_norm": 0.7944750785827637,
"learning_rate": 7.222075445642904e-05,
"loss": 2.2346,
"step": 20
},
{
"epoch": 0.007733382434174185,
"grad_norm": 0.7866911292076111,
"learning_rate": 6.946477593864228e-05,
"loss": 2.1823,
"step": 21
},
{
"epoch": 0.008101638740563431,
"grad_norm": 0.8714109659194946,
"learning_rate": 6.664685702961344e-05,
"loss": 2.3636,
"step": 22
},
{
"epoch": 0.00846989504695268,
"grad_norm": 1.1833001375198364,
"learning_rate": 6.377906449072578e-05,
"loss": 2.7353,
"step": 23
},
{
"epoch": 0.008838151353341926,
"grad_norm": 1.4409525394439697,
"learning_rate": 6.087367864990233e-05,
"loss": 3.0607,
"step": 24
},
{
"epoch": 0.009206407659731172,
"grad_norm": 2.9500479698181152,
"learning_rate": 5.794314081535644e-05,
"loss": 3.1293,
"step": 25
},
{
"epoch": 0.009206407659731172,
"eval_loss": 2.3962597846984863,
"eval_runtime": 1.475,
"eval_samples_per_second": 33.898,
"eval_steps_per_second": 8.813,
"step": 25
},
{
"epoch": 0.00957466396612042,
"grad_norm": 0.6010366678237915,
"learning_rate": 5.500000000000001e-05,
"loss": 1.6247,
"step": 26
},
{
"epoch": 0.009942920272509667,
"grad_norm": 0.8184590935707092,
"learning_rate": 5.205685918464356e-05,
"loss": 1.8737,
"step": 27
},
{
"epoch": 0.010311176578898913,
"grad_norm": 0.7789363265037537,
"learning_rate": 4.912632135009769e-05,
"loss": 2.0136,
"step": 28
},
{
"epoch": 0.010679432885288161,
"grad_norm": 0.6929258108139038,
"learning_rate": 4.6220935509274235e-05,
"loss": 2.0049,
"step": 29
},
{
"epoch": 0.011047689191677408,
"grad_norm": 0.6341331601142883,
"learning_rate": 4.3353142970386564e-05,
"loss": 2.0244,
"step": 30
},
{
"epoch": 0.011415945498066654,
"grad_norm": 0.6360584497451782,
"learning_rate": 4.053522406135775e-05,
"loss": 2.0533,
"step": 31
},
{
"epoch": 0.0117842018044559,
"grad_norm": 0.6871073246002197,
"learning_rate": 3.777924554357096e-05,
"loss": 2.193,
"step": 32
},
{
"epoch": 0.012152458110845149,
"grad_norm": 0.7307798862457275,
"learning_rate": 3.509700894014496e-05,
"loss": 2.1686,
"step": 33
},
{
"epoch": 0.012520714417234395,
"grad_norm": 0.8923740386962891,
"learning_rate": 3.250000000000001e-05,
"loss": 2.2811,
"step": 34
},
{
"epoch": 0.012888970723623642,
"grad_norm": 1.1610867977142334,
"learning_rate": 2.9999339514117912e-05,
"loss": 2.533,
"step": 35
},
{
"epoch": 0.01325722703001289,
"grad_norm": 1.322665810585022,
"learning_rate": 2.760573569460757e-05,
"loss": 2.8074,
"step": 36
},
{
"epoch": 0.013625483336402136,
"grad_norm": 1.953356385231018,
"learning_rate": 2.53294383204969e-05,
"loss": 2.9117,
"step": 37
},
{
"epoch": 0.013993739642791382,
"grad_norm": 0.6058061122894287,
"learning_rate": 2.3180194846605367e-05,
"loss": 1.6222,
"step": 38
},
{
"epoch": 0.014361995949180629,
"grad_norm": 0.48876795172691345,
"learning_rate": 2.1167208663446025e-05,
"loss": 1.7171,
"step": 39
},
{
"epoch": 0.014730252255569877,
"grad_norm": 0.5596562623977661,
"learning_rate": 1.9299099686894423e-05,
"loss": 1.8393,
"step": 40
},
{
"epoch": 0.015098508561959123,
"grad_norm": 0.540923535823822,
"learning_rate": 1.758386744638546e-05,
"loss": 1.9241,
"step": 41
},
{
"epoch": 0.01546676486834837,
"grad_norm": 0.6124505996704102,
"learning_rate": 1.602885682970026e-05,
"loss": 1.9227,
"step": 42
},
{
"epoch": 0.015835021174737618,
"grad_norm": 0.6569370031356812,
"learning_rate": 1.464072663102903e-05,
"loss": 1.9246,
"step": 43
},
{
"epoch": 0.016203277481126863,
"grad_norm": 0.6232062578201294,
"learning_rate": 1.3425421036992098e-05,
"loss": 2.0976,
"step": 44
},
{
"epoch": 0.01657153378751611,
"grad_norm": 0.7638946175575256,
"learning_rate": 1.2388144172720251e-05,
"loss": 2.0661,
"step": 45
},
{
"epoch": 0.01693979009390536,
"grad_norm": 0.7543624639511108,
"learning_rate": 1.1533337816991932e-05,
"loss": 2.2258,
"step": 46
},
{
"epoch": 0.017308046400294604,
"grad_norm": 0.8359889984130859,
"learning_rate": 1.0864662381854632e-05,
"loss": 2.2464,
"step": 47
},
{
"epoch": 0.01767630270668385,
"grad_norm": 0.983206033706665,
"learning_rate": 1.0384981238178534e-05,
"loss": 2.4463,
"step": 48
},
{
"epoch": 0.0180445590130731,
"grad_norm": 1.324151873588562,
"learning_rate": 1.0096348454262845e-05,
"loss": 2.7316,
"step": 49
},
{
"epoch": 0.018412815319462345,
"grad_norm": 3.246047258377075,
"learning_rate": 1e-05,
"loss": 3.102,
"step": 50
},
{
"epoch": 0.018412815319462345,
"eval_loss": 2.300698757171631,
"eval_runtime": 1.4858,
"eval_samples_per_second": 33.653,
"eval_steps_per_second": 8.75,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5899604877626573e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}