lesso11's picture
Training in progress, step 75, checkpoint
0747751 verified
raw
history blame
15.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5474452554744526,
"eval_steps": 9,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0072992700729927005,
"grad_norm": 0.5759531855583191,
"learning_rate": 1e-05,
"loss": 2.5578,
"step": 1
},
{
"epoch": 0.0072992700729927005,
"eval_loss": 2.5493626594543457,
"eval_runtime": 3.9453,
"eval_samples_per_second": 29.402,
"eval_steps_per_second": 3.802,
"step": 1
},
{
"epoch": 0.014598540145985401,
"grad_norm": 0.4575677216053009,
"learning_rate": 2e-05,
"loss": 2.4742,
"step": 2
},
{
"epoch": 0.021897810218978103,
"grad_norm": 0.4650042653083801,
"learning_rate": 3e-05,
"loss": 2.5112,
"step": 3
},
{
"epoch": 0.029197080291970802,
"grad_norm": 0.5823487043380737,
"learning_rate": 4e-05,
"loss": 2.5448,
"step": 4
},
{
"epoch": 0.0364963503649635,
"grad_norm": 0.4799887239933014,
"learning_rate": 5e-05,
"loss": 2.5753,
"step": 5
},
{
"epoch": 0.043795620437956206,
"grad_norm": 0.576637864112854,
"learning_rate": 6e-05,
"loss": 2.5572,
"step": 6
},
{
"epoch": 0.051094890510948905,
"grad_norm": 0.500969648361206,
"learning_rate": 7e-05,
"loss": 2.4998,
"step": 7
},
{
"epoch": 0.058394160583941604,
"grad_norm": 0.48866644501686096,
"learning_rate": 8e-05,
"loss": 2.4528,
"step": 8
},
{
"epoch": 0.06569343065693431,
"grad_norm": 0.45695871114730835,
"learning_rate": 9e-05,
"loss": 2.4748,
"step": 9
},
{
"epoch": 0.06569343065693431,
"eval_loss": 2.4808189868927,
"eval_runtime": 3.3274,
"eval_samples_per_second": 34.862,
"eval_steps_per_second": 4.508,
"step": 9
},
{
"epoch": 0.072992700729927,
"grad_norm": 0.5332048535346985,
"learning_rate": 0.0001,
"loss": 2.2931,
"step": 10
},
{
"epoch": 0.08029197080291971,
"grad_norm": 0.5480175614356995,
"learning_rate": 9.99695413509548e-05,
"loss": 2.4459,
"step": 11
},
{
"epoch": 0.08759124087591241,
"grad_norm": 0.5366429090499878,
"learning_rate": 9.987820251299122e-05,
"loss": 2.3188,
"step": 12
},
{
"epoch": 0.0948905109489051,
"grad_norm": 0.5494810938835144,
"learning_rate": 9.972609476841367e-05,
"loss": 2.3401,
"step": 13
},
{
"epoch": 0.10218978102189781,
"grad_norm": 0.536588728427887,
"learning_rate": 9.951340343707852e-05,
"loss": 2.2276,
"step": 14
},
{
"epoch": 0.10948905109489052,
"grad_norm": 0.578070342540741,
"learning_rate": 9.924038765061042e-05,
"loss": 2.2032,
"step": 15
},
{
"epoch": 0.11678832116788321,
"grad_norm": 0.5455062985420227,
"learning_rate": 9.890738003669029e-05,
"loss": 2.1968,
"step": 16
},
{
"epoch": 0.12408759124087591,
"grad_norm": 0.6033762097358704,
"learning_rate": 9.851478631379982e-05,
"loss": 2.0457,
"step": 17
},
{
"epoch": 0.13138686131386862,
"grad_norm": 0.5091367363929749,
"learning_rate": 9.806308479691595e-05,
"loss": 2.0457,
"step": 18
},
{
"epoch": 0.13138686131386862,
"eval_loss": 2.0244650840759277,
"eval_runtime": 3.3259,
"eval_samples_per_second": 34.878,
"eval_steps_per_second": 4.51,
"step": 18
},
{
"epoch": 0.1386861313868613,
"grad_norm": 0.6819591522216797,
"learning_rate": 9.755282581475769e-05,
"loss": 2.0456,
"step": 19
},
{
"epoch": 0.145985401459854,
"grad_norm": 0.5838029980659485,
"learning_rate": 9.698463103929542e-05,
"loss": 2.1287,
"step": 20
},
{
"epoch": 0.15328467153284672,
"grad_norm": 0.5925353765487671,
"learning_rate": 9.635919272833938e-05,
"loss": 1.8451,
"step": 21
},
{
"epoch": 0.16058394160583941,
"grad_norm": 0.5294898152351379,
"learning_rate": 9.567727288213005e-05,
"loss": 1.7995,
"step": 22
},
{
"epoch": 0.1678832116788321,
"grad_norm": 0.5663394331932068,
"learning_rate": 9.493970231495835e-05,
"loss": 1.7454,
"step": 23
},
{
"epoch": 0.17518248175182483,
"grad_norm": 0.5861549973487854,
"learning_rate": 9.414737964294636e-05,
"loss": 1.8476,
"step": 24
},
{
"epoch": 0.18248175182481752,
"grad_norm": 0.5432003736495972,
"learning_rate": 9.330127018922194e-05,
"loss": 1.7331,
"step": 25
},
{
"epoch": 0.1897810218978102,
"grad_norm": 0.5292729735374451,
"learning_rate": 9.24024048078213e-05,
"loss": 1.6665,
"step": 26
},
{
"epoch": 0.19708029197080293,
"grad_norm": 0.604349672794342,
"learning_rate": 9.145187862775209e-05,
"loss": 1.5397,
"step": 27
},
{
"epoch": 0.19708029197080293,
"eval_loss": 1.55046546459198,
"eval_runtime": 3.3307,
"eval_samples_per_second": 34.828,
"eval_steps_per_second": 4.504,
"step": 27
},
{
"epoch": 0.20437956204379562,
"grad_norm": 0.612851619720459,
"learning_rate": 9.045084971874738e-05,
"loss": 1.5245,
"step": 28
},
{
"epoch": 0.2116788321167883,
"grad_norm": 0.5950889587402344,
"learning_rate": 8.940053768033609e-05,
"loss": 1.3849,
"step": 29
},
{
"epoch": 0.21897810218978103,
"grad_norm": 0.5285123586654663,
"learning_rate": 8.83022221559489e-05,
"loss": 1.4316,
"step": 30
},
{
"epoch": 0.22627737226277372,
"grad_norm": 0.5811218023300171,
"learning_rate": 8.715724127386972e-05,
"loss": 1.3643,
"step": 31
},
{
"epoch": 0.23357664233576642,
"grad_norm": 0.6191416382789612,
"learning_rate": 8.596699001693255e-05,
"loss": 1.3797,
"step": 32
},
{
"epoch": 0.24087591240875914,
"grad_norm": 0.6147345304489136,
"learning_rate": 8.473291852294987e-05,
"loss": 1.3354,
"step": 33
},
{
"epoch": 0.24817518248175183,
"grad_norm": 0.6098886132240295,
"learning_rate": 8.345653031794292e-05,
"loss": 1.2501,
"step": 34
},
{
"epoch": 0.25547445255474455,
"grad_norm": 0.6387662887573242,
"learning_rate": 8.213938048432697e-05,
"loss": 1.2498,
"step": 35
},
{
"epoch": 0.26277372262773724,
"grad_norm": 0.6446758508682251,
"learning_rate": 8.07830737662829e-05,
"loss": 1.1279,
"step": 36
},
{
"epoch": 0.26277372262773724,
"eval_loss": 1.0792505741119385,
"eval_runtime": 3.3273,
"eval_samples_per_second": 34.863,
"eval_steps_per_second": 4.508,
"step": 36
},
{
"epoch": 0.27007299270072993,
"grad_norm": 0.6810407638549805,
"learning_rate": 7.938926261462366e-05,
"loss": 1.0696,
"step": 37
},
{
"epoch": 0.2773722627737226,
"grad_norm": 0.7299973964691162,
"learning_rate": 7.795964517353735e-05,
"loss": 1.0278,
"step": 38
},
{
"epoch": 0.2846715328467153,
"grad_norm": 0.9009426236152649,
"learning_rate": 7.649596321166024e-05,
"loss": 0.9941,
"step": 39
},
{
"epoch": 0.291970802919708,
"grad_norm": 0.881585419178009,
"learning_rate": 7.500000000000001e-05,
"loss": 0.8963,
"step": 40
},
{
"epoch": 0.29927007299270075,
"grad_norm": 0.7808435559272766,
"learning_rate": 7.347357813929454e-05,
"loss": 0.8841,
"step": 41
},
{
"epoch": 0.30656934306569344,
"grad_norm": 0.7895486950874329,
"learning_rate": 7.191855733945387e-05,
"loss": 0.8795,
"step": 42
},
{
"epoch": 0.31386861313868614,
"grad_norm": 0.7057706117630005,
"learning_rate": 7.033683215379002e-05,
"loss": 0.7984,
"step": 43
},
{
"epoch": 0.32116788321167883,
"grad_norm": 0.8553966879844666,
"learning_rate": 6.873032967079561e-05,
"loss": 0.7712,
"step": 44
},
{
"epoch": 0.3284671532846715,
"grad_norm": 0.7784386277198792,
"learning_rate": 6.710100716628344e-05,
"loss": 0.6806,
"step": 45
},
{
"epoch": 0.3284671532846715,
"eval_loss": 0.6408138871192932,
"eval_runtime": 3.3252,
"eval_samples_per_second": 34.885,
"eval_steps_per_second": 4.511,
"step": 45
},
{
"epoch": 0.3357664233576642,
"grad_norm": 0.8031398057937622,
"learning_rate": 6.545084971874738e-05,
"loss": 0.6404,
"step": 46
},
{
"epoch": 0.34306569343065696,
"grad_norm": 0.7656677961349487,
"learning_rate": 6.378186779084995e-05,
"loss": 0.5158,
"step": 47
},
{
"epoch": 0.35036496350364965,
"grad_norm": 0.750630259513855,
"learning_rate": 6.209609477998338e-05,
"loss": 0.5667,
"step": 48
},
{
"epoch": 0.35766423357664234,
"grad_norm": 0.832373321056366,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.5518,
"step": 49
},
{
"epoch": 0.36496350364963503,
"grad_norm": 1.1414823532104492,
"learning_rate": 5.868240888334653e-05,
"loss": 0.5022,
"step": 50
},
{
"epoch": 0.3722627737226277,
"grad_norm": 0.8492874503135681,
"learning_rate": 5.695865504800327e-05,
"loss": 0.4418,
"step": 51
},
{
"epoch": 0.3795620437956204,
"grad_norm": 0.7709046602249146,
"learning_rate": 5.522642316338268e-05,
"loss": 0.3382,
"step": 52
},
{
"epoch": 0.38686131386861317,
"grad_norm": 0.7917667627334595,
"learning_rate": 5.348782368720626e-05,
"loss": 0.3708,
"step": 53
},
{
"epoch": 0.39416058394160586,
"grad_norm": 0.6456604599952698,
"learning_rate": 5.174497483512506e-05,
"loss": 0.3194,
"step": 54
},
{
"epoch": 0.39416058394160586,
"eval_loss": 0.2983303666114807,
"eval_runtime": 3.3312,
"eval_samples_per_second": 34.822,
"eval_steps_per_second": 4.503,
"step": 54
},
{
"epoch": 0.40145985401459855,
"grad_norm": 0.6997843980789185,
"learning_rate": 5e-05,
"loss": 0.3086,
"step": 55
},
{
"epoch": 0.40875912408759124,
"grad_norm": 0.7327139973640442,
"learning_rate": 4.825502516487497e-05,
"loss": 0.2994,
"step": 56
},
{
"epoch": 0.41605839416058393,
"grad_norm": 0.5769990086555481,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.2329,
"step": 57
},
{
"epoch": 0.4233576642335766,
"grad_norm": 0.6165249347686768,
"learning_rate": 4.477357683661734e-05,
"loss": 0.2371,
"step": 58
},
{
"epoch": 0.4306569343065693,
"grad_norm": 0.6056733131408691,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.2217,
"step": 59
},
{
"epoch": 0.43795620437956206,
"grad_norm": 0.6416125297546387,
"learning_rate": 4.131759111665349e-05,
"loss": 0.1895,
"step": 60
},
{
"epoch": 0.44525547445255476,
"grad_norm": 0.6404258012771606,
"learning_rate": 3.960441545911204e-05,
"loss": 0.1652,
"step": 61
},
{
"epoch": 0.45255474452554745,
"grad_norm": 0.5605882406234741,
"learning_rate": 3.790390522001662e-05,
"loss": 0.1647,
"step": 62
},
{
"epoch": 0.45985401459854014,
"grad_norm": 0.5275187492370605,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.129,
"step": 63
},
{
"epoch": 0.45985401459854014,
"eval_loss": 0.13693049550056458,
"eval_runtime": 3.3295,
"eval_samples_per_second": 34.84,
"eval_steps_per_second": 4.505,
"step": 63
},
{
"epoch": 0.46715328467153283,
"grad_norm": 0.49548661708831787,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.129,
"step": 64
},
{
"epoch": 0.4744525547445255,
"grad_norm": 0.3914906680583954,
"learning_rate": 3.289899283371657e-05,
"loss": 0.1175,
"step": 65
},
{
"epoch": 0.48175182481751827,
"grad_norm": 0.41363123059272766,
"learning_rate": 3.12696703292044e-05,
"loss": 0.1176,
"step": 66
},
{
"epoch": 0.48905109489051096,
"grad_norm": 0.4557228088378906,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.1208,
"step": 67
},
{
"epoch": 0.49635036496350365,
"grad_norm": 0.4289744198322296,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.1057,
"step": 68
},
{
"epoch": 0.5036496350364964,
"grad_norm": 0.39465251564979553,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.1006,
"step": 69
},
{
"epoch": 0.5109489051094891,
"grad_norm": 0.34635651111602783,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0931,
"step": 70
},
{
"epoch": 0.5182481751824818,
"grad_norm": 0.27340167760849,
"learning_rate": 2.350403678833976e-05,
"loss": 0.0751,
"step": 71
},
{
"epoch": 0.5255474452554745,
"grad_norm": 0.4173787236213684,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0944,
"step": 72
},
{
"epoch": 0.5255474452554745,
"eval_loss": 0.08399030566215515,
"eval_runtime": 3.3264,
"eval_samples_per_second": 34.872,
"eval_steps_per_second": 4.509,
"step": 72
},
{
"epoch": 0.5328467153284672,
"grad_norm": 0.36685168743133545,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0896,
"step": 73
},
{
"epoch": 0.5401459854014599,
"grad_norm": 0.38893231749534607,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0866,
"step": 74
},
{
"epoch": 0.5474452554744526,
"grad_norm": 0.7083960771560669,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.1027,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7720265790259200.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}