|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.003283640901031063, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.2836409010310635e-05, |
|
"grad_norm": 0.039061568677425385, |
|
"learning_rate": 1e-05, |
|
"loss": 10.374, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 3.2836409010310635e-05, |
|
"eval_loss": 10.37774658203125, |
|
"eval_runtime": 169.5857, |
|
"eval_samples_per_second": 151.227, |
|
"eval_steps_per_second": 18.905, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 6.567281802062127e-05, |
|
"grad_norm": 0.04970359802246094, |
|
"learning_rate": 2e-05, |
|
"loss": 10.3808, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 9.85092270309319e-05, |
|
"grad_norm": 0.051377296447753906, |
|
"learning_rate": 3e-05, |
|
"loss": 10.3791, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00013134563604124254, |
|
"grad_norm": 0.040986333042383194, |
|
"learning_rate": 4e-05, |
|
"loss": 10.3831, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00016418204505155317, |
|
"grad_norm": 0.04276856407523155, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3766, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0001970184540618638, |
|
"grad_norm": 0.03917916491627693, |
|
"learning_rate": 6e-05, |
|
"loss": 10.3777, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00022985486307217442, |
|
"grad_norm": 0.054591104388237, |
|
"learning_rate": 7e-05, |
|
"loss": 10.3798, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0002626912720824851, |
|
"grad_norm": 0.04181118682026863, |
|
"learning_rate": 8e-05, |
|
"loss": 10.3828, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0002955276810927957, |
|
"grad_norm": 0.039910946041345596, |
|
"learning_rate": 9e-05, |
|
"loss": 10.3794, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0002955276810927957, |
|
"eval_loss": 10.377296447753906, |
|
"eval_runtime": 170.6195, |
|
"eval_samples_per_second": 150.311, |
|
"eval_steps_per_second": 18.79, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00032836409010310633, |
|
"grad_norm": 0.057242438197135925, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3841, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00036120049911341694, |
|
"grad_norm": 0.06191801652312279, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 10.3803, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0003940369081237276, |
|
"grad_norm": 0.06025468185544014, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 10.3891, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00042687331713403825, |
|
"grad_norm": 0.04083796963095665, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 10.3679, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00045970972614434885, |
|
"grad_norm": 0.04752117395401001, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 10.3791, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0004925461351546594, |
|
"grad_norm": 0.063045933842659, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 10.383, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0005253825441649702, |
|
"grad_norm": 0.03456421196460724, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 10.3785, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0005582189531752808, |
|
"grad_norm": 0.04961790144443512, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 10.3833, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0005910553621855914, |
|
"grad_norm": 0.05567217618227005, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 10.3881, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0005910553621855914, |
|
"eval_loss": 10.37619400024414, |
|
"eval_runtime": 170.4028, |
|
"eval_samples_per_second": 150.502, |
|
"eval_steps_per_second": 18.814, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0006238917711959021, |
|
"grad_norm": 0.038412440568208694, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 10.3748, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0006567281802062127, |
|
"grad_norm": 0.04399815946817398, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 10.3757, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0006895645892165233, |
|
"grad_norm": 0.03764853626489639, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 10.3762, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0007224009982268339, |
|
"grad_norm": 0.052127059549093246, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 10.3771, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0007552374072371446, |
|
"grad_norm": 0.03697521612048149, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 10.374, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0007880738162474552, |
|
"grad_norm": 0.04965825751423836, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 10.3714, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0008209102252577658, |
|
"grad_norm": 0.038044273853302, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.3757, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0008537466342680765, |
|
"grad_norm": 0.04307650402188301, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 10.3715, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0008865830432783871, |
|
"grad_norm": 0.041188716888427734, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 10.3712, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0008865830432783871, |
|
"eval_loss": 10.375112533569336, |
|
"eval_runtime": 170.1694, |
|
"eval_samples_per_second": 150.709, |
|
"eval_steps_per_second": 18.84, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0009194194522886977, |
|
"grad_norm": 0.03614542633295059, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 10.3765, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0009522558612990083, |
|
"grad_norm": 0.0354689322412014, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 10.3743, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.000985092270309319, |
|
"grad_norm": 0.0464780293405056, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 10.3809, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0010179286793196296, |
|
"grad_norm": 0.05983458459377289, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 10.3758, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0010507650883299403, |
|
"grad_norm": 0.04344520345330238, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 10.3735, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0010836014973402508, |
|
"grad_norm": 0.041173264384269714, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 10.3722, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0011164379063505615, |
|
"grad_norm": 0.041133031249046326, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 10.3769, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0011492743153608722, |
|
"grad_norm": 0.06315415352582932, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 10.378, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0011821107243711827, |
|
"grad_norm": 0.04024801030755043, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 10.373, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0011821107243711827, |
|
"eval_loss": 10.374061584472656, |
|
"eval_runtime": 170.5991, |
|
"eval_samples_per_second": 150.329, |
|
"eval_steps_per_second": 18.793, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0012149471333814934, |
|
"grad_norm": 0.042859919369220734, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 10.3751, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0012477835423918041, |
|
"grad_norm": 0.04361676052212715, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 10.3766, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0012806199514021146, |
|
"grad_norm": 0.053741976618766785, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 10.3753, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0013134563604124253, |
|
"grad_norm": 0.0702785924077034, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3751, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0013462927694227358, |
|
"grad_norm": 0.06600352376699448, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 10.3788, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0013791291784330465, |
|
"grad_norm": 0.037580136209726334, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 10.3778, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0014119655874433572, |
|
"grad_norm": 0.05780002847313881, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 10.375, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0014448019964536677, |
|
"grad_norm": 0.04174739867448807, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 10.3758, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0014776384054639784, |
|
"grad_norm": 0.053912002593278885, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 10.3738, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0014776384054639784, |
|
"eval_loss": 10.373043060302734, |
|
"eval_runtime": 170.5618, |
|
"eval_samples_per_second": 150.362, |
|
"eval_steps_per_second": 18.797, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0015104748144742892, |
|
"grad_norm": 0.044272515922784805, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 10.3758, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0015433112234845997, |
|
"grad_norm": 0.04709697142243385, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 10.3749, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0015761476324949104, |
|
"grad_norm": 0.04305019602179527, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 10.364, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.001608984041505221, |
|
"grad_norm": 0.049739986658096313, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 10.3708, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0016418204505155316, |
|
"grad_norm": 0.04268916696310043, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 10.3697, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0016746568595258423, |
|
"grad_norm": 0.042693495750427246, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 10.3793, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.001707493268536153, |
|
"grad_norm": 0.045006535947322845, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 10.3675, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0017403296775464635, |
|
"grad_norm": 0.04501199349761009, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 10.3753, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0017731660865567742, |
|
"grad_norm": 0.04330871254205704, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 10.374, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0017731660865567742, |
|
"eval_loss": 10.37214469909668, |
|
"eval_runtime": 170.1851, |
|
"eval_samples_per_second": 150.695, |
|
"eval_steps_per_second": 18.838, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0018060024955670847, |
|
"grad_norm": 0.05513521283864975, |
|
"learning_rate": 5e-05, |
|
"loss": 10.367, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0018388389045773954, |
|
"grad_norm": 0.048482492566108704, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 10.3717, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.001871675313587706, |
|
"grad_norm": 0.0455101802945137, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 10.3688, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0019045117225980166, |
|
"grad_norm": 0.04642629623413086, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 10.3778, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0019373481316083273, |
|
"grad_norm": 0.0608275830745697, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 10.3782, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.001970184540618638, |
|
"grad_norm": 0.049797091633081436, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 10.3706, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0020030209496289487, |
|
"grad_norm": 0.0518428236246109, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 10.3705, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.002035857358639259, |
|
"grad_norm": 0.04151507094502449, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 10.3754, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0020686937676495697, |
|
"grad_norm": 0.047984637320041656, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 10.3595, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0020686937676495697, |
|
"eval_loss": 10.37141227722168, |
|
"eval_runtime": 170.6032, |
|
"eval_samples_per_second": 150.325, |
|
"eval_steps_per_second": 18.792, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0021015301766598806, |
|
"grad_norm": 0.049031127244234085, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 10.3674, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.002134366585670191, |
|
"grad_norm": 0.051563944667577744, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 10.376, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0021672029946805016, |
|
"grad_norm": 0.04762021079659462, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 10.3799, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0022000394036908125, |
|
"grad_norm": 0.06098484992980957, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 10.3745, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.002232875812701123, |
|
"grad_norm": 0.04760457202792168, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 10.3699, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.0022657122217114335, |
|
"grad_norm": 0.05251610651612282, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 10.3793, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0022985486307217444, |
|
"grad_norm": 0.05606340244412422, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3652, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.002331385039732055, |
|
"grad_norm": 0.04817664250731468, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 10.374, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0023642214487423654, |
|
"grad_norm": 0.047362156212329865, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 10.3679, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0023642214487423654, |
|
"eval_loss": 10.370889663696289, |
|
"eval_runtime": 170.6022, |
|
"eval_samples_per_second": 150.326, |
|
"eval_steps_per_second": 18.792, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0023970578577526764, |
|
"grad_norm": 0.05063464865088463, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 10.3752, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.002429894266762987, |
|
"grad_norm": 0.05633007362484932, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 10.3643, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.0024627306757732973, |
|
"grad_norm": 0.06308276206254959, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 10.3707, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0024955670847836083, |
|
"grad_norm": 0.05032104626297951, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 10.3751, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.0025284034937939188, |
|
"grad_norm": 0.06600265204906464, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 10.3799, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.0025612399028042293, |
|
"grad_norm": 0.05626071244478226, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 10.3706, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.0025940763118145397, |
|
"grad_norm": 0.04748469963669777, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 10.369, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.0026269127208248507, |
|
"grad_norm": 0.04868736118078232, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 10.3713, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.002659749129835161, |
|
"grad_norm": 0.04755145311355591, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 10.3695, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.002659749129835161, |
|
"eval_loss": 10.370580673217773, |
|
"eval_runtime": 170.173, |
|
"eval_samples_per_second": 150.705, |
|
"eval_steps_per_second": 18.84, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.0026925855388454717, |
|
"grad_norm": 0.04993366450071335, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 10.3709, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.0027254219478557826, |
|
"grad_norm": 0.047633569687604904, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 10.3658, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.002758258356866093, |
|
"grad_norm": 0.049595627933740616, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 10.3725, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.0027910947658764036, |
|
"grad_norm": 0.04706868901848793, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.3715, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.0028239311748867145, |
|
"grad_norm": 0.047884490340948105, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 10.364, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.002856767583897025, |
|
"grad_norm": 0.04975724220275879, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 10.3678, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.0028896039929073355, |
|
"grad_norm": 0.052582498639822006, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 10.3702, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.0029224404019176464, |
|
"grad_norm": 0.05560288950800896, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 10.3735, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.002955276810927957, |
|
"grad_norm": 0.061860982328653336, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 10.3729, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.002955276810927957, |
|
"eval_loss": 10.370454788208008, |
|
"eval_runtime": 170.5443, |
|
"eval_samples_per_second": 150.377, |
|
"eval_steps_per_second": 18.799, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0029881132199382674, |
|
"grad_norm": 0.0577683299779892, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 10.3711, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.0030209496289485783, |
|
"grad_norm": 0.05124616622924805, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 10.3703, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.003053786037958889, |
|
"grad_norm": 0.052736300975084305, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 10.3715, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.0030866224469691993, |
|
"grad_norm": 0.05762464180588722, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 10.3688, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.0031194588559795102, |
|
"grad_norm": 0.0635187178850174, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 10.3695, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.0031522952649898207, |
|
"grad_norm": 0.05708334967494011, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 10.366, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.003185131674000131, |
|
"grad_norm": 0.05262709781527519, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 10.3671, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.003217968083010442, |
|
"grad_norm": 0.06074366718530655, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 10.3769, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.0032508044920207526, |
|
"grad_norm": 0.04959198087453842, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 10.3706, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.0032508044920207526, |
|
"eval_loss": 10.370431900024414, |
|
"eval_runtime": 170.5901, |
|
"eval_samples_per_second": 150.337, |
|
"eval_steps_per_second": 18.794, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.003283640901031063, |
|
"grad_norm": 0.050457920879125595, |
|
"learning_rate": 0.0, |
|
"loss": 10.375, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5230244659200.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|