dsakerkwq's picture
Training in progress, step 50, checkpoint
7b19b03 verified
{
"best_metric": 1.4766654968261719,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.07929428089999009,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015858856179998017,
"grad_norm": 17.070512771606445,
"learning_rate": 5e-05,
"loss": 3.6777,
"step": 1
},
{
"epoch": 0.0015858856179998017,
"eval_loss": 5.90218448638916,
"eval_runtime": 3.3307,
"eval_samples_per_second": 15.012,
"eval_steps_per_second": 3.903,
"step": 1
},
{
"epoch": 0.0031717712359996034,
"grad_norm": 19.348953247070312,
"learning_rate": 0.0001,
"loss": 4.7538,
"step": 2
},
{
"epoch": 0.004757656853999405,
"grad_norm": 13.42625617980957,
"learning_rate": 9.990365154573717e-05,
"loss": 3.2714,
"step": 3
},
{
"epoch": 0.006343542471999207,
"grad_norm": 2.2228305339813232,
"learning_rate": 9.961501876182148e-05,
"loss": 2.0772,
"step": 4
},
{
"epoch": 0.007929428089999009,
"grad_norm": 1.0048638582229614,
"learning_rate": 9.913533761814537e-05,
"loss": 1.954,
"step": 5
},
{
"epoch": 0.00951531370799881,
"grad_norm": 1.0425435304641724,
"learning_rate": 9.846666218300807e-05,
"loss": 1.876,
"step": 6
},
{
"epoch": 0.011101199325998613,
"grad_norm": 1.0215988159179688,
"learning_rate": 9.761185582727977e-05,
"loss": 1.8797,
"step": 7
},
{
"epoch": 0.012687084943998414,
"grad_norm": 0.8580153584480286,
"learning_rate": 9.657457896300791e-05,
"loss": 1.8136,
"step": 8
},
{
"epoch": 0.014272970561998216,
"grad_norm": 0.6667729616165161,
"learning_rate": 9.535927336897098e-05,
"loss": 1.7673,
"step": 9
},
{
"epoch": 0.015858856179998018,
"grad_norm": 0.701425313949585,
"learning_rate": 9.397114317029975e-05,
"loss": 1.7358,
"step": 10
},
{
"epoch": 0.01744474179799782,
"grad_norm": 0.7387133836746216,
"learning_rate": 9.241613255361455e-05,
"loss": 1.6905,
"step": 11
},
{
"epoch": 0.01903062741599762,
"grad_norm": 1.0632332563400269,
"learning_rate": 9.070090031310558e-05,
"loss": 1.7093,
"step": 12
},
{
"epoch": 0.020616513033997422,
"grad_norm": 0.7202914953231812,
"learning_rate": 8.883279133655399e-05,
"loss": 1.6132,
"step": 13
},
{
"epoch": 0.022202398651997225,
"grad_norm": 0.7657666206359863,
"learning_rate": 8.681980515339464e-05,
"loss": 1.6596,
"step": 14
},
{
"epoch": 0.023788284269997028,
"grad_norm": 0.6034996509552002,
"learning_rate": 8.467056167950311e-05,
"loss": 1.668,
"step": 15
},
{
"epoch": 0.025374169887996827,
"grad_norm": 0.5026582479476929,
"learning_rate": 8.239426430539243e-05,
"loss": 1.7384,
"step": 16
},
{
"epoch": 0.02696005550599663,
"grad_norm": 0.42584991455078125,
"learning_rate": 8.000066048588211e-05,
"loss": 1.6253,
"step": 17
},
{
"epoch": 0.028545941123996433,
"grad_norm": 0.4252493381500244,
"learning_rate": 7.75e-05,
"loss": 1.6889,
"step": 18
},
{
"epoch": 0.030131826741996232,
"grad_norm": 0.4458921551704407,
"learning_rate": 7.490299105985507e-05,
"loss": 1.6895,
"step": 19
},
{
"epoch": 0.031717712359996035,
"grad_norm": 0.4872418940067291,
"learning_rate": 7.222075445642904e-05,
"loss": 1.6197,
"step": 20
},
{
"epoch": 0.033303597977995834,
"grad_norm": 0.4587940573692322,
"learning_rate": 6.946477593864228e-05,
"loss": 1.622,
"step": 21
},
{
"epoch": 0.03488948359599564,
"grad_norm": 0.48748770356178284,
"learning_rate": 6.664685702961344e-05,
"loss": 1.6673,
"step": 22
},
{
"epoch": 0.03647536921399544,
"grad_norm": 0.5389514565467834,
"learning_rate": 6.377906449072578e-05,
"loss": 1.6634,
"step": 23
},
{
"epoch": 0.03806125483199524,
"grad_norm": 0.6838032603263855,
"learning_rate": 6.087367864990233e-05,
"loss": 1.6041,
"step": 24
},
{
"epoch": 0.039647140449995046,
"grad_norm": 1.1133662462234497,
"learning_rate": 5.794314081535644e-05,
"loss": 1.5255,
"step": 25
},
{
"epoch": 0.039647140449995046,
"eval_loss": 1.4964512586593628,
"eval_runtime": 3.4121,
"eval_samples_per_second": 14.654,
"eval_steps_per_second": 3.81,
"step": 25
},
{
"epoch": 0.041233026067994845,
"grad_norm": 0.40284931659698486,
"learning_rate": 5.500000000000001e-05,
"loss": 1.5774,
"step": 26
},
{
"epoch": 0.042818911685994644,
"grad_norm": 0.38245460391044617,
"learning_rate": 5.205685918464356e-05,
"loss": 1.6014,
"step": 27
},
{
"epoch": 0.04440479730399445,
"grad_norm": 0.4388914108276367,
"learning_rate": 4.912632135009769e-05,
"loss": 1.597,
"step": 28
},
{
"epoch": 0.04599068292199425,
"grad_norm": 0.42553648352622986,
"learning_rate": 4.6220935509274235e-05,
"loss": 1.5929,
"step": 29
},
{
"epoch": 0.047576568539994056,
"grad_norm": 0.4502233862876892,
"learning_rate": 4.3353142970386564e-05,
"loss": 1.6029,
"step": 30
},
{
"epoch": 0.049162454157993855,
"grad_norm": 0.4477775990962982,
"learning_rate": 4.053522406135775e-05,
"loss": 1.6697,
"step": 31
},
{
"epoch": 0.050748339775993655,
"grad_norm": 0.47372493147850037,
"learning_rate": 3.777924554357096e-05,
"loss": 1.6562,
"step": 32
},
{
"epoch": 0.05233422539399346,
"grad_norm": 0.46918997168540955,
"learning_rate": 3.509700894014496e-05,
"loss": 1.6332,
"step": 33
},
{
"epoch": 0.05392011101199326,
"grad_norm": 0.4643358290195465,
"learning_rate": 3.250000000000001e-05,
"loss": 1.6086,
"step": 34
},
{
"epoch": 0.05550599662999306,
"grad_norm": 0.4862658679485321,
"learning_rate": 2.9999339514117912e-05,
"loss": 1.6013,
"step": 35
},
{
"epoch": 0.057091882247992866,
"grad_norm": 0.506260871887207,
"learning_rate": 2.760573569460757e-05,
"loss": 1.6465,
"step": 36
},
{
"epoch": 0.058677767865992665,
"grad_norm": 0.594947099685669,
"learning_rate": 2.53294383204969e-05,
"loss": 1.5712,
"step": 37
},
{
"epoch": 0.060263653483992465,
"grad_norm": 0.5527778267860413,
"learning_rate": 2.3180194846605367e-05,
"loss": 1.4738,
"step": 38
},
{
"epoch": 0.06184953910199227,
"grad_norm": 0.33043065667152405,
"learning_rate": 2.1167208663446025e-05,
"loss": 1.5579,
"step": 39
},
{
"epoch": 0.06343542471999207,
"grad_norm": 0.374056875705719,
"learning_rate": 1.9299099686894423e-05,
"loss": 1.5702,
"step": 40
},
{
"epoch": 0.06502131033799187,
"grad_norm": 0.36577755212783813,
"learning_rate": 1.758386744638546e-05,
"loss": 1.6545,
"step": 41
},
{
"epoch": 0.06660719595599167,
"grad_norm": 0.33573096990585327,
"learning_rate": 1.602885682970026e-05,
"loss": 1.6232,
"step": 42
},
{
"epoch": 0.06819308157399148,
"grad_norm": 0.364795058965683,
"learning_rate": 1.464072663102903e-05,
"loss": 1.5976,
"step": 43
},
{
"epoch": 0.06977896719199128,
"grad_norm": 0.40120968222618103,
"learning_rate": 1.3425421036992098e-05,
"loss": 1.6431,
"step": 44
},
{
"epoch": 0.07136485280999108,
"grad_norm": 0.4092036783695221,
"learning_rate": 1.2388144172720251e-05,
"loss": 1.6469,
"step": 45
},
{
"epoch": 0.07295073842799088,
"grad_norm": 0.420834481716156,
"learning_rate": 1.1533337816991932e-05,
"loss": 1.6295,
"step": 46
},
{
"epoch": 0.07453662404599068,
"grad_norm": 0.4343208372592926,
"learning_rate": 1.0864662381854632e-05,
"loss": 1.6405,
"step": 47
},
{
"epoch": 0.07612250966399048,
"grad_norm": 0.4775432050228119,
"learning_rate": 1.0384981238178534e-05,
"loss": 1.6059,
"step": 48
},
{
"epoch": 0.07770839528199029,
"grad_norm": 0.5238398313522339,
"learning_rate": 1.0096348454262845e-05,
"loss": 1.5927,
"step": 49
},
{
"epoch": 0.07929428089999009,
"grad_norm": 0.7994441390037537,
"learning_rate": 1e-05,
"loss": 1.4393,
"step": 50
},
{
"epoch": 0.07929428089999009,
"eval_loss": 1.4766654968261719,
"eval_runtime": 3.4247,
"eval_samples_per_second": 14.6,
"eval_steps_per_second": 3.796,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.631381723512832e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}