tarabukinivan's picture
Training in progress, step 30, checkpoint
bc0dc91 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.005264081417792595,
"eval_steps": 8,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00017546938059308652,
"grad_norm": 0.021896077319979668,
"learning_rate": 2e-05,
"loss": 10.3889,
"step": 1
},
{
"epoch": 0.00017546938059308652,
"eval_loss": 10.378170013427734,
"eval_runtime": 20.1811,
"eval_samples_per_second": 118.923,
"eval_steps_per_second": 59.462,
"step": 1
},
{
"epoch": 0.00035093876118617303,
"grad_norm": 0.02895743027329445,
"learning_rate": 4e-05,
"loss": 10.3799,
"step": 2
},
{
"epoch": 0.0005264081417792595,
"grad_norm": 0.03139834105968475,
"learning_rate": 6e-05,
"loss": 10.3833,
"step": 3
},
{
"epoch": 0.0007018775223723461,
"grad_norm": 0.03180989623069763,
"learning_rate": 8e-05,
"loss": 10.3725,
"step": 4
},
{
"epoch": 0.0008773469029654326,
"grad_norm": 0.04147100821137428,
"learning_rate": 0.0001,
"loss": 10.3725,
"step": 5
},
{
"epoch": 0.001052816283558519,
"grad_norm": 0.034542471170425415,
"learning_rate": 0.00012,
"loss": 10.3629,
"step": 6
},
{
"epoch": 0.0012282856641516056,
"grad_norm": 0.030410971492528915,
"learning_rate": 0.00014,
"loss": 10.3853,
"step": 7
},
{
"epoch": 0.0014037550447446921,
"grad_norm": 0.026322724297642708,
"learning_rate": 0.00016,
"loss": 10.3725,
"step": 8
},
{
"epoch": 0.0014037550447446921,
"eval_loss": 10.377918243408203,
"eval_runtime": 20.1636,
"eval_samples_per_second": 119.026,
"eval_steps_per_second": 59.513,
"step": 8
},
{
"epoch": 0.0015792244253377786,
"grad_norm": 0.023104362189769745,
"learning_rate": 0.00018,
"loss": 10.3882,
"step": 9
},
{
"epoch": 0.001754693805930865,
"grad_norm": 0.0319962278008461,
"learning_rate": 0.0002,
"loss": 10.3865,
"step": 10
},
{
"epoch": 0.0019301631865239516,
"grad_norm": 0.031027458608150482,
"learning_rate": 0.00019876883405951377,
"loss": 10.3821,
"step": 11
},
{
"epoch": 0.002105632567117038,
"grad_norm": 0.04138890653848648,
"learning_rate": 0.00019510565162951537,
"loss": 10.3692,
"step": 12
},
{
"epoch": 0.0022811019477101246,
"grad_norm": 0.02614734135568142,
"learning_rate": 0.0001891006524188368,
"loss": 10.3891,
"step": 13
},
{
"epoch": 0.0024565713283032113,
"grad_norm": 0.02401694655418396,
"learning_rate": 0.00018090169943749476,
"loss": 10.3821,
"step": 14
},
{
"epoch": 0.0026320407088962976,
"grad_norm": 0.03232363983988762,
"learning_rate": 0.00017071067811865476,
"loss": 10.3736,
"step": 15
},
{
"epoch": 0.0028075100894893843,
"grad_norm": 0.02802196890115738,
"learning_rate": 0.00015877852522924732,
"loss": 10.3833,
"step": 16
},
{
"epoch": 0.0028075100894893843,
"eval_loss": 10.37726879119873,
"eval_runtime": 20.1987,
"eval_samples_per_second": 118.819,
"eval_steps_per_second": 59.41,
"step": 16
},
{
"epoch": 0.0029829794700824705,
"grad_norm": 0.032058119773864746,
"learning_rate": 0.00014539904997395468,
"loss": 10.3759,
"step": 17
},
{
"epoch": 0.0031584488506755572,
"grad_norm": 0.03021497279405594,
"learning_rate": 0.00013090169943749476,
"loss": 10.3804,
"step": 18
},
{
"epoch": 0.0033339182312686435,
"grad_norm": 0.02468908578157425,
"learning_rate": 0.0001156434465040231,
"loss": 10.3832,
"step": 19
},
{
"epoch": 0.00350938761186173,
"grad_norm": 0.03662308305501938,
"learning_rate": 0.0001,
"loss": 10.3869,
"step": 20
},
{
"epoch": 0.0036848569924548165,
"grad_norm": 0.031086675822734833,
"learning_rate": 8.435655349597689e-05,
"loss": 10.3756,
"step": 21
},
{
"epoch": 0.003860326373047903,
"grad_norm": 0.030318187549710274,
"learning_rate": 6.909830056250527e-05,
"loss": 10.372,
"step": 22
},
{
"epoch": 0.00403579575364099,
"grad_norm": 0.03376461938023567,
"learning_rate": 5.4600950026045326e-05,
"loss": 10.377,
"step": 23
},
{
"epoch": 0.004211265134234076,
"grad_norm": 0.02785138599574566,
"learning_rate": 4.12214747707527e-05,
"loss": 10.3689,
"step": 24
},
{
"epoch": 0.004211265134234076,
"eval_loss": 10.376875877380371,
"eval_runtime": 20.1626,
"eval_samples_per_second": 119.032,
"eval_steps_per_second": 59.516,
"step": 24
},
{
"epoch": 0.0043867345148271624,
"grad_norm": 0.02769087813794613,
"learning_rate": 2.9289321881345254e-05,
"loss": 10.374,
"step": 25
},
{
"epoch": 0.004562203895420249,
"grad_norm": 0.03563356772065163,
"learning_rate": 1.9098300562505266e-05,
"loss": 10.38,
"step": 26
},
{
"epoch": 0.004737673276013336,
"grad_norm": 0.0283515527844429,
"learning_rate": 1.0899347581163221e-05,
"loss": 10.379,
"step": 27
},
{
"epoch": 0.0049131426566064226,
"grad_norm": 0.029156768694519997,
"learning_rate": 4.8943483704846475e-06,
"loss": 10.3778,
"step": 28
},
{
"epoch": 0.005088612037199508,
"grad_norm": 0.02999081462621689,
"learning_rate": 1.231165940486234e-06,
"loss": 10.377,
"step": 29
},
{
"epoch": 0.005264081417792595,
"grad_norm": 0.031297340989112854,
"learning_rate": 0.0,
"loss": 10.3777,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 70,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1551708979200.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}