|
{ |
|
"best_metric": 0.8940727710723877, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.028328611898016998, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00028328611898016995, |
|
"grad_norm": 2.4837124347686768, |
|
"learning_rate": 5e-06, |
|
"loss": 1.0328, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00028328611898016995, |
|
"eval_loss": 1.364790678024292, |
|
"eval_runtime": 482.3304, |
|
"eval_samples_per_second": 12.328, |
|
"eval_steps_per_second": 6.164, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005665722379603399, |
|
"grad_norm": 0.7393561005592346, |
|
"learning_rate": 1e-05, |
|
"loss": 1.044, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0008498583569405099, |
|
"grad_norm": 0.5064038038253784, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.0228, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0011331444759206798, |
|
"grad_norm": 1.1812818050384521, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0907, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00141643059490085, |
|
"grad_norm": 0.7924461364746094, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.1227, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0016997167138810198, |
|
"grad_norm": 0.5778879523277283, |
|
"learning_rate": 3e-05, |
|
"loss": 1.0562, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00198300283286119, |
|
"grad_norm": 0.7456988096237183, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.0785, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0022662889518413596, |
|
"grad_norm": 0.5129983425140381, |
|
"learning_rate": 4e-05, |
|
"loss": 1.0225, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0025495750708215297, |
|
"grad_norm": 0.4113961458206177, |
|
"learning_rate": 4.5e-05, |
|
"loss": 1.0419, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0028328611898017, |
|
"grad_norm": 0.37529513239860535, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9771, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0031161473087818695, |
|
"grad_norm": 0.4042710065841675, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.9975, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0033994334277620396, |
|
"grad_norm": 0.3994333744049072, |
|
"learning_rate": 6e-05, |
|
"loss": 1.0203, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0036827195467422098, |
|
"grad_norm": 0.430587500333786, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 0.981, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00396600566572238, |
|
"grad_norm": 0.4299090504646301, |
|
"learning_rate": 7e-05, |
|
"loss": 0.9638, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00424929178470255, |
|
"grad_norm": 0.41372954845428467, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.9036, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.004532577903682719, |
|
"grad_norm": 0.4030589163303375, |
|
"learning_rate": 8e-05, |
|
"loss": 0.8851, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.004815864022662889, |
|
"grad_norm": 0.43109366297721863, |
|
"learning_rate": 8.5e-05, |
|
"loss": 0.9949, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0050991501416430595, |
|
"grad_norm": 0.4088292419910431, |
|
"learning_rate": 9e-05, |
|
"loss": 0.9794, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00538243626062323, |
|
"grad_norm": 0.3350220322608948, |
|
"learning_rate": 9.5e-05, |
|
"loss": 0.8792, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0056657223796034, |
|
"grad_norm": 0.30330002307891846, |
|
"learning_rate": 0.0001, |
|
"loss": 0.878, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00594900849858357, |
|
"grad_norm": 0.3451264500617981, |
|
"learning_rate": 9.999238475781957e-05, |
|
"loss": 0.8818, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.006232294617563739, |
|
"grad_norm": 0.3362950384616852, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 0.8614, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.006515580736543909, |
|
"grad_norm": 0.36854445934295654, |
|
"learning_rate": 9.99314767377287e-05, |
|
"loss": 0.8847, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.006798866855524079, |
|
"grad_norm": 0.3834778666496277, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.9134, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.007082152974504249, |
|
"grad_norm": 0.4009423851966858, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 0.8813, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0073654390934844195, |
|
"grad_norm": 0.32073336839675903, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 0.8641, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00764872521246459, |
|
"grad_norm": 0.3810870051383972, |
|
"learning_rate": 9.962730758206611e-05, |
|
"loss": 0.9017, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.00793201133144476, |
|
"grad_norm": 0.40584418177604675, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 0.9204, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.00821529745042493, |
|
"grad_norm": 0.29945215582847595, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 0.8645, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0084985835694051, |
|
"grad_norm": 0.4067940413951874, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.9061, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.008781869688385268, |
|
"grad_norm": 0.4845235049724579, |
|
"learning_rate": 9.908135917238321e-05, |
|
"loss": 0.9485, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.009065155807365438, |
|
"grad_norm": 0.4056897461414337, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.9093, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.009348441926345609, |
|
"grad_norm": 0.43699613213539124, |
|
"learning_rate": 9.871850323926177e-05, |
|
"loss": 0.9523, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.009631728045325779, |
|
"grad_norm": 0.5528873205184937, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 0.8597, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.009915014164305949, |
|
"grad_norm": 0.5298404097557068, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.0194, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.010198300283286119, |
|
"grad_norm": 0.5607902407646179, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.9661, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.010481586402266289, |
|
"grad_norm": 0.6369351744651794, |
|
"learning_rate": 9.781523779815179e-05, |
|
"loss": 1.0808, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01076487252124646, |
|
"grad_norm": 0.5015552639961243, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.9148, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01104815864022663, |
|
"grad_norm": 0.5619277954101562, |
|
"learning_rate": 9.727592877996585e-05, |
|
"loss": 1.0133, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0113314447592068, |
|
"grad_norm": 0.7182181477546692, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.994, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01161473087818697, |
|
"grad_norm": 0.7005313634872437, |
|
"learning_rate": 9.667902132486009e-05, |
|
"loss": 1.0247, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.01189801699716714, |
|
"grad_norm": 0.771409273147583, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 1.1388, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.012181303116147308, |
|
"grad_norm": 0.6452864408493042, |
|
"learning_rate": 9.602524267262203e-05, |
|
"loss": 1.0388, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.012464589235127478, |
|
"grad_norm": 0.7697048783302307, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.0937, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.012747875354107648, |
|
"grad_norm": 0.8071401119232178, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.938, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.013031161473087818, |
|
"grad_norm": 0.9409806728363037, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 1.23, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.013314447592067988, |
|
"grad_norm": 0.9912319779396057, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 1.0184, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.013597733711048159, |
|
"grad_norm": 1.4439435005187988, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.1788, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.013881019830028329, |
|
"grad_norm": 1.5036474466323853, |
|
"learning_rate": 9.373098535696979e-05, |
|
"loss": 1.0626, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.014164305949008499, |
|
"grad_norm": 2.3197476863861084, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.0557, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.014164305949008499, |
|
"eval_loss": 0.9729824066162109, |
|
"eval_runtime": 485.1181, |
|
"eval_samples_per_second": 12.257, |
|
"eval_steps_per_second": 6.128, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.014447592067988669, |
|
"grad_norm": 0.3290400803089142, |
|
"learning_rate": 9.285836503510562e-05, |
|
"loss": 0.7674, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.014730878186968839, |
|
"grad_norm": 0.422162264585495, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 0.8912, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.01501416430594901, |
|
"grad_norm": 0.38885658979415894, |
|
"learning_rate": 9.193352839727121e-05, |
|
"loss": 0.8074, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.01529745042492918, |
|
"grad_norm": 0.32865843176841736, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.8353, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.015580736543909348, |
|
"grad_norm": 0.29051336646080017, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.8639, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.01586402266288952, |
|
"grad_norm": 0.2875722646713257, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.8683, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.01614730878186969, |
|
"grad_norm": 0.26893699169158936, |
|
"learning_rate": 8.993177550236464e-05, |
|
"loss": 0.8548, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.01643059490084986, |
|
"grad_norm": 0.2714105248451233, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 0.8185, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.01671388101983003, |
|
"grad_norm": 0.2685096561908722, |
|
"learning_rate": 8.885729807284856e-05, |
|
"loss": 0.8188, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0169971671388102, |
|
"grad_norm": 0.27536776661872864, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.806, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.017280453257790367, |
|
"grad_norm": 0.26395905017852783, |
|
"learning_rate": 8.773547901113862e-05, |
|
"loss": 0.7769, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.017563739376770537, |
|
"grad_norm": 0.28983741998672485, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 0.856, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.017847025495750707, |
|
"grad_norm": 0.24493105709552765, |
|
"learning_rate": 8.656768508095853e-05, |
|
"loss": 0.7598, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.018130311614730877, |
|
"grad_norm": 0.25833049416542053, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.8238, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.018413597733711047, |
|
"grad_norm": 0.23423363268375397, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.838, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.018696883852691217, |
|
"grad_norm": 0.24402977526187897, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.7818, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.018980169971671387, |
|
"grad_norm": 0.2572822868824005, |
|
"learning_rate": 8.409991800312493e-05, |
|
"loss": 0.8178, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.019263456090651557, |
|
"grad_norm": 0.24587130546569824, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.8298, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.019546742209631728, |
|
"grad_norm": 0.2567668557167053, |
|
"learning_rate": 8.280295144952536e-05, |
|
"loss": 0.82, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.019830028328611898, |
|
"grad_norm": 0.25297489762306213, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.7948, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.020113314447592068, |
|
"grad_norm": 0.24557141959667206, |
|
"learning_rate": 8.146601955249188e-05, |
|
"loss": 0.8097, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.020396600566572238, |
|
"grad_norm": 0.28915223479270935, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.7822, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.020679886685552408, |
|
"grad_norm": 0.2591084837913513, |
|
"learning_rate": 8.009075115760243e-05, |
|
"loss": 0.7799, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.020963172804532578, |
|
"grad_norm": 0.28063005208969116, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 0.85, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.021246458923512748, |
|
"grad_norm": 0.3160620927810669, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.8925, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02152974504249292, |
|
"grad_norm": 0.3204711079597473, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 0.7994, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.02181303116147309, |
|
"grad_norm": 0.31728625297546387, |
|
"learning_rate": 7.723195175075136e-05, |
|
"loss": 0.865, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.02209631728045326, |
|
"grad_norm": 0.33611783385276794, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.8099, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.02237960339943343, |
|
"grad_norm": 0.3129088580608368, |
|
"learning_rate": 7.575190374550272e-05, |
|
"loss": 0.8857, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.0226628895184136, |
|
"grad_norm": 0.3404497802257538, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.8408, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.02294617563739377, |
|
"grad_norm": 0.31922274827957153, |
|
"learning_rate": 7.424048101231686e-05, |
|
"loss": 0.8155, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.02322946175637394, |
|
"grad_norm": 0.4240337610244751, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 0.9036, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.02351274787535411, |
|
"grad_norm": 0.402992308139801, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 0.9222, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.02379603399433428, |
|
"grad_norm": 0.4444620907306671, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.9426, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.024079320113314446, |
|
"grad_norm": 0.5036823153495789, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.8989, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.024362606232294616, |
|
"grad_norm": 0.5430697202682495, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 1.0644, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.024645892351274786, |
|
"grad_norm": 0.47117090225219727, |
|
"learning_rate": 6.953655642446368e-05, |
|
"loss": 0.8856, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.024929178470254956, |
|
"grad_norm": 0.6156890392303467, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.9403, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.025212464589235126, |
|
"grad_norm": 0.5698954463005066, |
|
"learning_rate": 6.7918397477265e-05, |
|
"loss": 0.8913, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.025495750708215296, |
|
"grad_norm": 0.5107197165489197, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.9912, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.025779036827195467, |
|
"grad_norm": 0.6079100966453552, |
|
"learning_rate": 6.627840772285784e-05, |
|
"loss": 0.9322, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.026062322946175637, |
|
"grad_norm": 0.6184947490692139, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.9983, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.026345609065155807, |
|
"grad_norm": 0.6620588302612305, |
|
"learning_rate": 6.461858523613684e-05, |
|
"loss": 0.9366, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.026628895184135977, |
|
"grad_norm": 0.7128224968910217, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 1.0045, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.026912181303116147, |
|
"grad_norm": 0.7751644253730774, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.8427, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.027195467422096317, |
|
"grad_norm": 0.8441152572631836, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.9694, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.027478753541076487, |
|
"grad_norm": 0.9727038145065308, |
|
"learning_rate": 6.124755271719325e-05, |
|
"loss": 1.0076, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.027762039660056657, |
|
"grad_norm": 0.9640235900878906, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 0.9572, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.028045325779036828, |
|
"grad_norm": 1.6535217761993408, |
|
"learning_rate": 5.9540449768827246e-05, |
|
"loss": 1.0303, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.028328611898016998, |
|
"grad_norm": 3.554504632949829, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.9742, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.028328611898016998, |
|
"eval_loss": 0.8940727710723877, |
|
"eval_runtime": 484.5241, |
|
"eval_samples_per_second": 12.272, |
|
"eval_steps_per_second": 6.136, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.37604737531904e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|