|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.704225352112676, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007042253521126761, |
|
"grad_norm": 36.489925384521484, |
|
"learning_rate": 1e-05, |
|
"loss": 16.7418, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007042253521126761, |
|
"eval_loss": 8.365168571472168, |
|
"eval_runtime": 5.2379, |
|
"eval_samples_per_second": 22.91, |
|
"eval_steps_per_second": 2.864, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014084507042253521, |
|
"grad_norm": 54.84891128540039, |
|
"learning_rate": 2e-05, |
|
"loss": 15.7412, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02112676056338028, |
|
"grad_norm": 64.47398376464844, |
|
"learning_rate": 3e-05, |
|
"loss": 16.7694, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.028169014084507043, |
|
"grad_norm": 16.057498931884766, |
|
"learning_rate": 4e-05, |
|
"loss": 16.2653, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.035211267605633804, |
|
"grad_norm": 38.04544448852539, |
|
"learning_rate": 5e-05, |
|
"loss": 16.5994, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04225352112676056, |
|
"grad_norm": 32.003196716308594, |
|
"learning_rate": 6e-05, |
|
"loss": 16.4649, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04929577464788732, |
|
"grad_norm": 43.975486755371094, |
|
"learning_rate": 7e-05, |
|
"loss": 15.2708, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.056338028169014086, |
|
"grad_norm": 23.013490676879883, |
|
"learning_rate": 8e-05, |
|
"loss": 15.9144, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06338028169014084, |
|
"grad_norm": 21.642526626586914, |
|
"learning_rate": 9e-05, |
|
"loss": 13.8646, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06338028169014084, |
|
"eval_loss": 5.608087539672852, |
|
"eval_runtime": 5.2413, |
|
"eval_samples_per_second": 22.895, |
|
"eval_steps_per_second": 2.862, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07042253521126761, |
|
"grad_norm": 20.63976287841797, |
|
"learning_rate": 0.0001, |
|
"loss": 11.0946, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07746478873239436, |
|
"grad_norm": 22.017879486083984, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 9.8704, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08450704225352113, |
|
"grad_norm": 25.062084197998047, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 6.6076, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09154929577464789, |
|
"grad_norm": 17.164138793945312, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 6.0168, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09859154929577464, |
|
"grad_norm": 11.223540306091309, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 3.8373, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1056338028169014, |
|
"grad_norm": 7.377206325531006, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.6019, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11267605633802817, |
|
"grad_norm": 7.338124752044678, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 4.2857, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11971830985915492, |
|
"grad_norm": 6.844852447509766, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.2959, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1267605633802817, |
|
"grad_norm": 6.174362659454346, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.6541, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1267605633802817, |
|
"eval_loss": 1.0343117713928223, |
|
"eval_runtime": 5.2408, |
|
"eval_samples_per_second": 22.897, |
|
"eval_steps_per_second": 2.862, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.13380281690140844, |
|
"grad_norm": 11.295928001403809, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.0969, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.14084507042253522, |
|
"grad_norm": 5.165931224822998, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.1286, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14788732394366197, |
|
"grad_norm": 15.428300857543945, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.5216, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.15492957746478872, |
|
"grad_norm": 15.729449272155762, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 3.2321, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1619718309859155, |
|
"grad_norm": 6.993392467498779, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.019, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.16901408450704225, |
|
"grad_norm": 3.678471326828003, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.4714, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.176056338028169, |
|
"grad_norm": 4.6879801750183105, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.449, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.18309859154929578, |
|
"grad_norm": 6.838872909545898, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.8043, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.19014084507042253, |
|
"grad_norm": 7.476458549499512, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.0743, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.19014084507042253, |
|
"eval_loss": 0.8306647539138794, |
|
"eval_runtime": 5.3176, |
|
"eval_samples_per_second": 22.567, |
|
"eval_steps_per_second": 2.821, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.19718309859154928, |
|
"grad_norm": 7.009374141693115, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.2197, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.20422535211267606, |
|
"grad_norm": 4.203879356384277, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 1.8692, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2112676056338028, |
|
"grad_norm": 2.9395430088043213, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.6414, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.21830985915492956, |
|
"grad_norm": 4.54150915145874, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 1.6064, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.22535211267605634, |
|
"grad_norm": 3.4813199043273926, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.5703, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2323943661971831, |
|
"grad_norm": 5.205654144287109, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 1.6502, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.23943661971830985, |
|
"grad_norm": 6.046175956726074, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 1.8305, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.24647887323943662, |
|
"grad_norm": 2.525583505630493, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.3965, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2535211267605634, |
|
"grad_norm": 7.969776630401611, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.1048, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2535211267605634, |
|
"eval_loss": 0.7210001349449158, |
|
"eval_runtime": 5.2587, |
|
"eval_samples_per_second": 22.819, |
|
"eval_steps_per_second": 2.852, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2605633802816901, |
|
"grad_norm": 2.2472925186157227, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.3476, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2676056338028169, |
|
"grad_norm": 3.452265739440918, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.7696, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2746478873239437, |
|
"grad_norm": 2.057129144668579, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 1.4384, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.28169014084507044, |
|
"grad_norm": 2.932300090789795, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.3321, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2887323943661972, |
|
"grad_norm": 1.8168283700942993, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 1.4552, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.29577464788732394, |
|
"grad_norm": 1.3311522006988525, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.4102, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3028169014084507, |
|
"grad_norm": 3.1506192684173584, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 1.5, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.30985915492957744, |
|
"grad_norm": 4.003086090087891, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.4761, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.31690140845070425, |
|
"grad_norm": 3.40531063079834, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 1.5122, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.31690140845070425, |
|
"eval_loss": 0.7483626008033752, |
|
"eval_runtime": 5.2389, |
|
"eval_samples_per_second": 22.905, |
|
"eval_steps_per_second": 2.863, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.323943661971831, |
|
"grad_norm": 2.6020545959472656, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.3895, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.33098591549295775, |
|
"grad_norm": 2.6481332778930664, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 1.4633, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3380281690140845, |
|
"grad_norm": 2.3728225231170654, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 1.7675, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.34507042253521125, |
|
"grad_norm": 2.0442349910736084, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 1.3868, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.352112676056338, |
|
"grad_norm": 1.7789822816848755, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.4195, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3591549295774648, |
|
"grad_norm": 2.0427401065826416, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 1.6343, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.36619718309859156, |
|
"grad_norm": 3.6951797008514404, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 1.7408, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.3732394366197183, |
|
"grad_norm": 3.672791004180908, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 1.524, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.38028169014084506, |
|
"grad_norm": 4.061415672302246, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 1.5609, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.38028169014084506, |
|
"eval_loss": 0.7512367963790894, |
|
"eval_runtime": 5.2814, |
|
"eval_samples_per_second": 22.721, |
|
"eval_steps_per_second": 2.84, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.3873239436619718, |
|
"grad_norm": 2.5772194862365723, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4108, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.39436619718309857, |
|
"grad_norm": 1.6014587879180908, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 1.365, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.4014084507042254, |
|
"grad_norm": 2.9118096828460693, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 1.7301, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.4084507042253521, |
|
"grad_norm": 4.28907585144043, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 1.7773, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.4154929577464789, |
|
"grad_norm": 3.053123712539673, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 1.6367, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.4225352112676056, |
|
"grad_norm": 3.9312641620635986, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 1.5563, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.4295774647887324, |
|
"grad_norm": 4.716941833496094, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 1.5059, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.43661971830985913, |
|
"grad_norm": 1.2284787893295288, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 1.3866, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.44366197183098594, |
|
"grad_norm": 1.6609184741973877, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 1.3671, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.44366197183098594, |
|
"eval_loss": 0.7309496402740479, |
|
"eval_runtime": 5.2546, |
|
"eval_samples_per_second": 22.837, |
|
"eval_steps_per_second": 2.855, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.4507042253521127, |
|
"grad_norm": 2.3474254608154297, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.6455, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.45774647887323944, |
|
"grad_norm": 3.384629726409912, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 1.5113, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.4647887323943662, |
|
"grad_norm": 2.769561290740967, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.0072, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.47183098591549294, |
|
"grad_norm": 1.6377378702163696, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 1.378, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.4788732394366197, |
|
"grad_norm": 2.9717202186584473, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 1.9511, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.4859154929577465, |
|
"grad_norm": 1.5930095911026, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 1.6474, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.49295774647887325, |
|
"grad_norm": 2.147733688354492, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.5024, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 2.3971445560455322, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 1.4185, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.5070422535211268, |
|
"grad_norm": 2.334181308746338, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 1.4944, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.5070422535211268, |
|
"eval_loss": 0.7280327677726746, |
|
"eval_runtime": 5.2593, |
|
"eval_samples_per_second": 22.817, |
|
"eval_steps_per_second": 2.852, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.5140845070422535, |
|
"grad_norm": 2.140132427215576, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 1.4209, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.5211267605633803, |
|
"grad_norm": 1.595037817955017, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 1.6229, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.528169014084507, |
|
"grad_norm": 3.3290388584136963, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 1.4053, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.5352112676056338, |
|
"grad_norm": 1.6777218580245972, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 1.5162, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.5422535211267606, |
|
"grad_norm": 2.207165479660034, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 1.4367, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.5492957746478874, |
|
"grad_norm": 2.791753053665161, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 1.5284, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.5563380281690141, |
|
"grad_norm": 2.0597660541534424, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 1.4983, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.5633802816901409, |
|
"grad_norm": 2.1963610649108887, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 1.469, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5704225352112676, |
|
"grad_norm": 1.8355844020843506, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 1.6337, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.5704225352112676, |
|
"eval_loss": 0.7295297980308533, |
|
"eval_runtime": 5.2947, |
|
"eval_samples_per_second": 22.664, |
|
"eval_steps_per_second": 2.833, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.5774647887323944, |
|
"grad_norm": 2.3852171897888184, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.3641, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.5845070422535211, |
|
"grad_norm": 2.421605110168457, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 1.4797, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.5915492957746479, |
|
"grad_norm": 1.8228073120117188, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 1.5467, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.5985915492957746, |
|
"grad_norm": 3.43153977394104, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.4331, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6056338028169014, |
|
"grad_norm": 1.7456953525543213, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 1.341, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.6126760563380281, |
|
"grad_norm": 1.962058663368225, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 1.5015, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.6197183098591549, |
|
"grad_norm": 4.884045600891113, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 1.6106, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.6267605633802817, |
|
"grad_norm": 1.6915897130966187, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 1.4477, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.6338028169014085, |
|
"grad_norm": 1.18873131275177, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 1.3877, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6338028169014085, |
|
"eval_loss": 0.7244928479194641, |
|
"eval_runtime": 5.2601, |
|
"eval_samples_per_second": 22.813, |
|
"eval_steps_per_second": 2.852, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6408450704225352, |
|
"grad_norm": 2.745110273361206, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 1.3234, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.647887323943662, |
|
"grad_norm": 1.5612205266952515, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 1.3812, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.6549295774647887, |
|
"grad_norm": 1.651207447052002, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 1.6955, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.6619718309859155, |
|
"grad_norm": 1.4542431831359863, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 1.4568, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.6690140845070423, |
|
"grad_norm": 2.2859489917755127, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 1.4368, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.676056338028169, |
|
"grad_norm": 2.264481544494629, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 1.7259, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.6830985915492958, |
|
"grad_norm": 3.839521646499634, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 1.663, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.6901408450704225, |
|
"grad_norm": 1.972511649131775, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 1.4722, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.6971830985915493, |
|
"grad_norm": 1.8594896793365479, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 1.6764, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.6971830985915493, |
|
"eval_loss": 0.7270611524581909, |
|
"eval_runtime": 5.2604, |
|
"eval_samples_per_second": 22.812, |
|
"eval_steps_per_second": 2.851, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.704225352112676, |
|
"grad_norm": 1.3822779655456543, |
|
"learning_rate": 0.0, |
|
"loss": 1.4299, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.53464644354048e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|