lesso05's picture
Training in progress, step 100, checkpoint
58e86d9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0016931790282845556,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.6931790282845557e-05,
"grad_norm": NaN,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0,
"step": 1
},
{
"epoch": 1.6931790282845557e-05,
"eval_loss": NaN,
"eval_runtime": 1940.2475,
"eval_samples_per_second": 25.633,
"eval_steps_per_second": 3.204,
"step": 1
},
{
"epoch": 3.3863580565691114e-05,
"grad_norm": NaN,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0,
"step": 2
},
{
"epoch": 5.079537084853667e-05,
"grad_norm": NaN,
"learning_rate": 6e-06,
"loss": 0.0,
"step": 3
},
{
"epoch": 6.772716113138223e-05,
"grad_norm": NaN,
"learning_rate": 8.000000000000001e-06,
"loss": 0.0,
"step": 4
},
{
"epoch": 8.465895141422778e-05,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.00010159074169707334,
"grad_norm": NaN,
"learning_rate": 1.2e-05,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.0001185225319799189,
"grad_norm": NaN,
"learning_rate": 1.4e-05,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.00013545432226276446,
"grad_norm": NaN,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.00015238611254561,
"grad_norm": NaN,
"learning_rate": 1.8e-05,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.00015238611254561,
"eval_loss": NaN,
"eval_runtime": 1939.6486,
"eval_samples_per_second": 25.641,
"eval_steps_per_second": 3.205,
"step": 9
},
{
"epoch": 0.00016931790282845557,
"grad_norm": NaN,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.0001862496931113011,
"grad_norm": NaN,
"learning_rate": 1.999390827019096e-05,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.00020318148339414668,
"grad_norm": NaN,
"learning_rate": 1.9975640502598243e-05,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.00022011327367699223,
"grad_norm": NaN,
"learning_rate": 1.9945218953682736e-05,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.0002370450639598378,
"grad_norm": NaN,
"learning_rate": 1.9902680687415704e-05,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.00025397685424268337,
"grad_norm": NaN,
"learning_rate": 1.9848077530122083e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.0002709086445255289,
"grad_norm": NaN,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.00028784043480837445,
"grad_norm": NaN,
"learning_rate": 1.9702957262759964e-05,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.00030477222509122,
"grad_norm": NaN,
"learning_rate": 1.961261695938319e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.00030477222509122,
"eval_loss": NaN,
"eval_runtime": 1956.0542,
"eval_samples_per_second": 25.426,
"eval_steps_per_second": 3.178,
"step": 18
},
{
"epoch": 0.0003217040153740656,
"grad_norm": NaN,
"learning_rate": 1.9510565162951538e-05,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.00033863580565691114,
"grad_norm": NaN,
"learning_rate": 1.9396926207859085e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.0003555675959397567,
"grad_norm": NaN,
"learning_rate": 1.9271838545667876e-05,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.0003724993862226022,
"grad_norm": NaN,
"learning_rate": 1.913545457642601e-05,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.0003894311765054478,
"grad_norm": NaN,
"learning_rate": 1.8987940462991673e-05,
"loss": 0.0,
"step": 23
},
{
"epoch": 0.00040636296678829337,
"grad_norm": NaN,
"learning_rate": 1.8829475928589272e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.0004232947570711389,
"grad_norm": NaN,
"learning_rate": 1.866025403784439e-05,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.00044022654735398445,
"grad_norm": NaN,
"learning_rate": 1.848048096156426e-05,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.00045715833763683005,
"grad_norm": NaN,
"learning_rate": 1.8290375725550417e-05,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.00045715833763683005,
"eval_loss": NaN,
"eval_runtime": 1939.2011,
"eval_samples_per_second": 25.647,
"eval_steps_per_second": 3.206,
"step": 27
},
{
"epoch": 0.0004740901279196756,
"grad_norm": NaN,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.0,
"step": 28
},
{
"epoch": 0.0004910219182025212,
"grad_norm": NaN,
"learning_rate": 1.788010753606722e-05,
"loss": 0.0,
"step": 29
},
{
"epoch": 0.0005079537084853667,
"grad_norm": NaN,
"learning_rate": 1.766044443118978e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.0005248854987682123,
"grad_norm": NaN,
"learning_rate": 1.7431448254773943e-05,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.0005418172890510578,
"grad_norm": NaN,
"learning_rate": 1.7193398003386514e-05,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.0005587490793339034,
"grad_norm": NaN,
"learning_rate": 1.6946583704589973e-05,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.0005756808696167489,
"grad_norm": NaN,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.0005926126598995945,
"grad_norm": NaN,
"learning_rate": 1.6427876096865394e-05,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.00060954445018244,
"grad_norm": NaN,
"learning_rate": 1.6156614753256583e-05,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.00060954445018244,
"eval_loss": NaN,
"eval_runtime": 1939.6183,
"eval_samples_per_second": 25.642,
"eval_steps_per_second": 3.205,
"step": 36
},
{
"epoch": 0.0006264762404652856,
"grad_norm": NaN,
"learning_rate": 1.5877852522924733e-05,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.0006434080307481312,
"grad_norm": NaN,
"learning_rate": 1.5591929034707468e-05,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.0006603398210309767,
"grad_norm": NaN,
"learning_rate": 1.529919264233205e-05,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.0006772716113138223,
"grad_norm": NaN,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.0006942034015966678,
"grad_norm": NaN,
"learning_rate": 1.469471562785891e-05,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.0007111351918795134,
"grad_norm": NaN,
"learning_rate": 1.4383711467890776e-05,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.0007280669821623589,
"grad_norm": NaN,
"learning_rate": 1.4067366430758004e-05,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.0007449987724452045,
"grad_norm": NaN,
"learning_rate": 1.3746065934159123e-05,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.0007619305627280501,
"grad_norm": NaN,
"learning_rate": 1.342020143325669e-05,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.0007619305627280501,
"eval_loss": NaN,
"eval_runtime": 1938.6726,
"eval_samples_per_second": 25.654,
"eval_steps_per_second": 3.207,
"step": 45
},
{
"epoch": 0.0007788623530108956,
"grad_norm": NaN,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.0007957941432937412,
"grad_norm": NaN,
"learning_rate": 1.2756373558169992e-05,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.0008127259335765867,
"grad_norm": NaN,
"learning_rate": 1.2419218955996677e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.0008296577238594323,
"grad_norm": NaN,
"learning_rate": 1.2079116908177592e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.0008465895141422778,
"grad_norm": NaN,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.0008635213044251234,
"grad_norm": NaN,
"learning_rate": 1.1391731009600655e-05,
"loss": 0.0,
"step": 51
},
{
"epoch": 0.0008804530947079689,
"grad_norm": NaN,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.0,
"step": 52
},
{
"epoch": 0.0008973848849908146,
"grad_norm": NaN,
"learning_rate": 1.0697564737441254e-05,
"loss": 0.0,
"step": 53
},
{
"epoch": 0.0009143166752736601,
"grad_norm": NaN,
"learning_rate": 1.0348994967025012e-05,
"loss": 0.0,
"step": 54
},
{
"epoch": 0.0009143166752736601,
"eval_loss": NaN,
"eval_runtime": 1939.1845,
"eval_samples_per_second": 25.647,
"eval_steps_per_second": 3.206,
"step": 54
},
{
"epoch": 0.0009312484655565056,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.0009481802558393512,
"grad_norm": NaN,
"learning_rate": 9.651005032974994e-06,
"loss": 0.0,
"step": 56
},
{
"epoch": 0.0009651120461221967,
"grad_norm": NaN,
"learning_rate": 9.302435262558748e-06,
"loss": 0.0,
"step": 57
},
{
"epoch": 0.0009820438364050424,
"grad_norm": NaN,
"learning_rate": 8.954715367323468e-06,
"loss": 0.0,
"step": 58
},
{
"epoch": 0.0009989756266878878,
"grad_norm": NaN,
"learning_rate": 8.60826899039935e-06,
"loss": 0.0,
"step": 59
},
{
"epoch": 0.0010159074169707335,
"grad_norm": NaN,
"learning_rate": 8.263518223330698e-06,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.001032839207253579,
"grad_norm": NaN,
"learning_rate": 7.92088309182241e-06,
"loss": 0.0,
"step": 61
},
{
"epoch": 0.0010497709975364246,
"grad_norm": NaN,
"learning_rate": 7.580781044003324e-06,
"loss": 0.0,
"step": 62
},
{
"epoch": 0.00106670278781927,
"grad_norm": NaN,
"learning_rate": 7.243626441830009e-06,
"loss": 0.0,
"step": 63
},
{
"epoch": 0.00106670278781927,
"eval_loss": NaN,
"eval_runtime": 1939.9054,
"eval_samples_per_second": 25.638,
"eval_steps_per_second": 3.205,
"step": 63
},
{
"epoch": 0.0010836345781021156,
"grad_norm": NaN,
"learning_rate": 6.909830056250527e-06,
"loss": 0.0,
"step": 64
},
{
"epoch": 0.001100566368384961,
"grad_norm": NaN,
"learning_rate": 6.579798566743314e-06,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.0011174981586678067,
"grad_norm": NaN,
"learning_rate": 6.25393406584088e-06,
"loss": 0.0,
"step": 66
},
{
"epoch": 0.0011344299489506524,
"grad_norm": NaN,
"learning_rate": 5.932633569242e-06,
"loss": 0.0,
"step": 67
},
{
"epoch": 0.0011513617392334978,
"grad_norm": NaN,
"learning_rate": 5.616288532109225e-06,
"loss": 0.0,
"step": 68
},
{
"epoch": 0.0011682935295163435,
"grad_norm": NaN,
"learning_rate": 5.305284372141095e-06,
"loss": 0.0,
"step": 69
},
{
"epoch": 0.001185225319799189,
"grad_norm": NaN,
"learning_rate": 5.000000000000003e-06,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.0012021571100820346,
"grad_norm": NaN,
"learning_rate": 4.700807357667953e-06,
"loss": 0.0,
"step": 71
},
{
"epoch": 0.00121908890036488,
"grad_norm": NaN,
"learning_rate": 4.408070965292534e-06,
"loss": 0.0,
"step": 72
},
{
"epoch": 0.00121908890036488,
"eval_loss": NaN,
"eval_runtime": 1939.3736,
"eval_samples_per_second": 25.645,
"eval_steps_per_second": 3.206,
"step": 72
},
{
"epoch": 0.0012360206906477256,
"grad_norm": NaN,
"learning_rate": 4.12214747707527e-06,
"loss": 0.0,
"step": 73
},
{
"epoch": 0.0012529524809305713,
"grad_norm": NaN,
"learning_rate": 3.8433852467434175e-06,
"loss": 0.0,
"step": 74
},
{
"epoch": 0.0012698842712134167,
"grad_norm": NaN,
"learning_rate": 3.5721239031346067e-06,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.0012868160614962624,
"grad_norm": NaN,
"learning_rate": 3.308693936411421e-06,
"loss": 0.0,
"step": 76
},
{
"epoch": 0.0013037478517791078,
"grad_norm": NaN,
"learning_rate": 3.0534162954100264e-06,
"loss": 0.0,
"step": 77
},
{
"epoch": 0.0013206796420619535,
"grad_norm": NaN,
"learning_rate": 2.8066019966134907e-06,
"loss": 0.0,
"step": 78
},
{
"epoch": 0.001337611432344799,
"grad_norm": NaN,
"learning_rate": 2.5685517452260566e-06,
"loss": 0.0,
"step": 79
},
{
"epoch": 0.0013545432226276446,
"grad_norm": NaN,
"learning_rate": 2.339555568810221e-06,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.00137147501291049,
"grad_norm": NaN,
"learning_rate": 2.119892463932781e-06,
"loss": 0.0,
"step": 81
},
{
"epoch": 0.00137147501291049,
"eval_loss": NaN,
"eval_runtime": 1938.6889,
"eval_samples_per_second": 25.654,
"eval_steps_per_second": 3.207,
"step": 81
},
{
"epoch": 0.0013884068031933356,
"grad_norm": NaN,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.0,
"step": 82
},
{
"epoch": 0.0014053385934761813,
"grad_norm": NaN,
"learning_rate": 1.709624274449584e-06,
"loss": 0.0,
"step": 83
},
{
"epoch": 0.0014222703837590267,
"grad_norm": NaN,
"learning_rate": 1.5195190384357405e-06,
"loss": 0.0,
"step": 84
},
{
"epoch": 0.0014392021740418724,
"grad_norm": NaN,
"learning_rate": 1.339745962155613e-06,
"loss": 0.0,
"step": 85
},
{
"epoch": 0.0014561339643247178,
"grad_norm": NaN,
"learning_rate": 1.1705240714107301e-06,
"loss": 0.0,
"step": 86
},
{
"epoch": 0.0014730657546075635,
"grad_norm": NaN,
"learning_rate": 1.012059537008332e-06,
"loss": 0.0,
"step": 87
},
{
"epoch": 0.001489997544890409,
"grad_norm": NaN,
"learning_rate": 8.645454235739903e-07,
"loss": 0.0,
"step": 88
},
{
"epoch": 0.0015069293351732546,
"grad_norm": NaN,
"learning_rate": 7.281614543321269e-07,
"loss": 0.0,
"step": 89
},
{
"epoch": 0.0015238611254561002,
"grad_norm": NaN,
"learning_rate": 6.030737921409169e-07,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.0015238611254561002,
"eval_loss": NaN,
"eval_runtime": 1940.5626,
"eval_samples_per_second": 25.629,
"eval_steps_per_second": 3.204,
"step": 90
},
{
"epoch": 0.0015407929157389456,
"grad_norm": NaN,
"learning_rate": 4.894348370484648e-07,
"loss": 0.0,
"step": 91
},
{
"epoch": 0.0015577247060217913,
"grad_norm": NaN,
"learning_rate": 3.8738304061681107e-07,
"loss": 0.0,
"step": 92
},
{
"epoch": 0.0015746564963046367,
"grad_norm": NaN,
"learning_rate": 2.970427372400353e-07,
"loss": 0.0,
"step": 93
},
{
"epoch": 0.0015915882865874824,
"grad_norm": NaN,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.0,
"step": 94
},
{
"epoch": 0.0016085200768703278,
"grad_norm": NaN,
"learning_rate": 1.519224698779198e-07,
"loss": 0.0,
"step": 95
},
{
"epoch": 0.0016254518671531735,
"grad_norm": NaN,
"learning_rate": 9.731931258429638e-08,
"loss": 0.0,
"step": 96
},
{
"epoch": 0.001642383657436019,
"grad_norm": NaN,
"learning_rate": 5.4781046317267103e-08,
"loss": 0.0,
"step": 97
},
{
"epoch": 0.0016593154477188646,
"grad_norm": NaN,
"learning_rate": 2.4359497401758026e-08,
"loss": 0.0,
"step": 98
},
{
"epoch": 0.0016762472380017102,
"grad_norm": NaN,
"learning_rate": 6.091729809042379e-09,
"loss": 0.0,
"step": 99
},
{
"epoch": 0.0016762472380017102,
"eval_loss": NaN,
"eval_runtime": 1940.5881,
"eval_samples_per_second": 25.629,
"eval_steps_per_second": 3.204,
"step": 99
},
{
"epoch": 0.0016931790282845556,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.30626883682304e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}