|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9615384615384617, |
|
"eval_steps": 7, |
|
"global_step": 77, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.038461538461538464, |
|
"grad_norm": 1.3442820310592651, |
|
"learning_rate": 1e-05, |
|
"loss": 1.8401, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.038461538461538464, |
|
"eval_loss": NaN, |
|
"eval_runtime": 5.3672, |
|
"eval_samples_per_second": 4.099, |
|
"eval_steps_per_second": 0.559, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07692307692307693, |
|
"grad_norm": 0.9724370837211609, |
|
"learning_rate": 2e-05, |
|
"loss": 1.8328, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.11538461538461539, |
|
"grad_norm": 1.101107120513916, |
|
"learning_rate": 3e-05, |
|
"loss": 1.678, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 1.3484959602355957, |
|
"learning_rate": 4e-05, |
|
"loss": 1.912, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.19230769230769232, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-05, |
|
"loss": 1.9033, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.23076923076923078, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.2692307692307692, |
|
"grad_norm": NaN, |
|
"learning_rate": 7e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2692307692307692, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8644, |
|
"eval_samples_per_second": 4.523, |
|
"eval_steps_per_second": 0.617, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": NaN, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.34615384615384615, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.38461538461538464, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4230769230769231, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.994504457428558e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.46153846153846156, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.978029910109491e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.950612572673255e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.5384615384615384, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.91231271437788e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5384615384615384, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8639, |
|
"eval_samples_per_second": 4.523, |
|
"eval_steps_per_second": 0.617, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5769230769230769, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.863214526624065e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.8034259378842e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.6538461538461539, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.733078376452171e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.6923076923076923, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.652326481535435e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.7307692307692307, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.561347763324484e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.460342212786932e-05, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8076923076923077, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.349531862043952e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8076923076923077, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.865, |
|
"eval_samples_per_second": 4.522, |
|
"eval_steps_per_second": 0.617, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8461538461538461, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.229160296295488e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.8846153846153846, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.099492118367123e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.9230769230769231, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.960812367055646e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9615384615384616, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.81342589055191e-05, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.657656676318346e-05, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.0384615384615385, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.493847138894209e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.0769230769230769, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.322357367194109e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.0769230769230769, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8674, |
|
"eval_samples_per_second": 4.52, |
|
"eval_steps_per_second": 0.616, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.1153846153846154, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.143564332954425e-05, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.1538461538461537, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.957861062067614e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1923076923076923, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.765655770625997e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.2307692307692308, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.56737096757421e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.2692307692307692, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.363442525942826e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.3076923076923077, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.154318724704853e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.3461538461538463, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.940459263361249e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.3461538461538463, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8653, |
|
"eval_samples_per_second": 4.522, |
|
"eval_steps_per_second": 0.617, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.3846153846153846, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.722334251421665e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.4230769230769231, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.500423175001705e-05, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.4615384615384617, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.275213842808383e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.0472013138307235e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.816886809092651e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5769230769230769, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.584776609860414e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.6153846153846154, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.351380944726465e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.6153846153846154, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8664, |
|
"eval_samples_per_second": 4.521, |
|
"eval_steps_per_second": 0.616, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.6538461538461537, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.117212868016303e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.6923076923076923, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.882787131983698e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.7307692307692308, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.648619055273537e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.7692307692307692, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.415223390139588e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.8076923076923077, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.183113190907349e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.8461538461538463, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.952798686169279e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.8846153846153846, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.7247861571916185e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.8846153846153846, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8653, |
|
"eval_samples_per_second": 4.522, |
|
"eval_steps_per_second": 0.617, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.9230769230769231, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.499576824998298e-05, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.9615384615384617, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.277665748578336e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0595407366387504e-05, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.0384615384615383, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8456812752951485e-05, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.076923076923077, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.636557474057173e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.1153846153846154, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4326290324257894e-05, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.1538461538461537, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.234344229374003e-05, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.1538461538461537, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8673, |
|
"eval_samples_per_second": 4.52, |
|
"eval_steps_per_second": 0.616, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.1923076923076925, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.042138937932388e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.230769230769231, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8564356670455767e-05, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.269230769230769, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.677642632805892e-05, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.3076923076923075, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5061528611057918e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.3461538461538463, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3423433236816563e-05, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.3846153846153846, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1865741094480909e-05, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.423076923076923, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0391876329443533e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.423076923076923, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8685, |
|
"eval_samples_per_second": 4.519, |
|
"eval_steps_per_second": 0.616, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.4615384615384617, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.005078816328771e-06, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.708397037045129e-06, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.5384615384615383, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.50468137956049e-06, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.5769230769230766, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.3965778721306755e-06, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.6153846153846154, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.386522366755169e-06, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.6538461538461537, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.476735184645674e-06, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.6923076923076925, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6692162354782944e-06, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.6923076923076925, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.868, |
|
"eval_samples_per_second": 4.519, |
|
"eval_steps_per_second": 0.616, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.730769230769231, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9657406211579966e-06, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.769230769230769, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3678547337593494e-06, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.8076923076923075, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.768728562211947e-07, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.8461538461538463, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.938742732674529e-07, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.8846153846153846, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.1970089890509527e-07, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.9230769230769234, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.4955425714431353e-08, |
|
"loss": 0.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.9615384615384617, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.9615384615384617, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.8662, |
|
"eval_samples_per_second": 4.521, |
|
"eval_steps_per_second": 0.616, |
|
"step": 77 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 77, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1313781074296832e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|