|
{ |
|
"best_metric": 0.874006450176239, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.0855431993156544, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000855431993156544, |
|
"grad_norm": 2.352600574493408, |
|
"learning_rate": 1e-05, |
|
"loss": 4.8284, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000855431993156544, |
|
"eval_loss": 1.5003745555877686, |
|
"eval_runtime": 8.1601, |
|
"eval_samples_per_second": 120.709, |
|
"eval_steps_per_second": 30.269, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001710863986313088, |
|
"grad_norm": 2.2152650356292725, |
|
"learning_rate": 2e-05, |
|
"loss": 4.6729, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0025662959794696323, |
|
"grad_norm": 3.0072169303894043, |
|
"learning_rate": 3e-05, |
|
"loss": 4.1083, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003421727972626176, |
|
"grad_norm": 4.585644245147705, |
|
"learning_rate": 4e-05, |
|
"loss": 2.6223, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00427715996578272, |
|
"grad_norm": 4.366663932800293, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0921, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0051325919589392645, |
|
"grad_norm": 2.732001304626465, |
|
"learning_rate": 6e-05, |
|
"loss": 0.8305, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005988023952095809, |
|
"grad_norm": 2.6135103702545166, |
|
"learning_rate": 7e-05, |
|
"loss": 0.7626, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006843455945252352, |
|
"grad_norm": 2.3796298503875732, |
|
"learning_rate": 8e-05, |
|
"loss": 0.8145, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.007698887938408896, |
|
"grad_norm": 2.055875301361084, |
|
"learning_rate": 9e-05, |
|
"loss": 0.6454, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00855431993156544, |
|
"grad_norm": 1.9896808862686157, |
|
"learning_rate": 0.0001, |
|
"loss": 0.768, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.009409751924721984, |
|
"grad_norm": 1.5508729219436646, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 0.6364, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.010265183917878529, |
|
"grad_norm": 1.433290958404541, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.6402, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.011120615911035072, |
|
"grad_norm": 1.0565850734710693, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 0.5274, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.011976047904191617, |
|
"grad_norm": 1.059982419013977, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 0.4195, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01283147989734816, |
|
"grad_norm": 2.2911834716796875, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.5623, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.013686911890504704, |
|
"grad_norm": 1.2333793640136719, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.5258, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01454234388366125, |
|
"grad_norm": 1.084723711013794, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 0.6243, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.015397775876817793, |
|
"grad_norm": 0.7262724041938782, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.3692, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.016253207869974338, |
|
"grad_norm": 1.5828852653503418, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.3979, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01710863986313088, |
|
"grad_norm": 1.155763030052185, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.3792, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.017964071856287425, |
|
"grad_norm": 0.7279295921325684, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.3955, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.018819503849443968, |
|
"grad_norm": 0.7820014953613281, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.404, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.019674935842600515, |
|
"grad_norm": 0.7350875735282898, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 0.3896, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.020530367835757058, |
|
"grad_norm": 0.780421257019043, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.432, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0213857998289136, |
|
"grad_norm": 1.194251298904419, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.4828, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.022241231822070145, |
|
"grad_norm": 0.9555559158325195, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 0.4872, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.023096663815226688, |
|
"grad_norm": 0.5422655344009399, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.4849, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.023952095808383235, |
|
"grad_norm": 0.7013706564903259, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.4789, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02480752780153978, |
|
"grad_norm": 0.6927315592765808, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 0.5087, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.02566295979469632, |
|
"grad_norm": 0.7441868185997009, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.571, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.026518391787852865, |
|
"grad_norm": 0.9051419496536255, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 0.5053, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02737382378100941, |
|
"grad_norm": 0.9479249715805054, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.6193, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.028229255774165955, |
|
"grad_norm": 0.5198885202407837, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.5146, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0290846877673225, |
|
"grad_norm": 0.5799432396888733, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.5284, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.029940119760479042, |
|
"grad_norm": 0.7015302181243896, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.5627, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.030795551753635585, |
|
"grad_norm": 0.6689801216125488, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.6305, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03165098374679213, |
|
"grad_norm": 0.7400729656219482, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 0.5877, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.032506415739948676, |
|
"grad_norm": 0.981304407119751, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 0.7527, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.033361847733105215, |
|
"grad_norm": 1.042090654373169, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.7431, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03421727972626176, |
|
"grad_norm": 0.9239062070846558, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.7556, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03507271171941831, |
|
"grad_norm": 0.708960771560669, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 0.6573, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03592814371257485, |
|
"grad_norm": 1.1409260034561157, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.7552, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.036783575705731396, |
|
"grad_norm": 1.2339162826538086, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 0.7546, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.037639007698887936, |
|
"grad_norm": 1.501574993133545, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.8573, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03849443969204448, |
|
"grad_norm": 2.9573240280151367, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.733, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03934987168520103, |
|
"grad_norm": 2.3327925205230713, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.7521, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04020530367835757, |
|
"grad_norm": 1.8159849643707275, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 0.8454, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.041060735671514116, |
|
"grad_norm": 1.5403494834899902, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.8458, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.041916167664670656, |
|
"grad_norm": 1.8811618089675903, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 1.049, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0427715996578272, |
|
"grad_norm": 3.8858306407928467, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.698, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0427715996578272, |
|
"eval_loss": 1.4491695165634155, |
|
"eval_runtime": 7.3642, |
|
"eval_samples_per_second": 133.755, |
|
"eval_steps_per_second": 33.541, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04362703165098375, |
|
"grad_norm": 43.02643966674805, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 9.5889, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04448246364414029, |
|
"grad_norm": 44.06403350830078, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 9.5999, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.045337895637296836, |
|
"grad_norm": 31.32398223876953, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 7.1508, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.046193327630453376, |
|
"grad_norm": 17.837383270263672, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.9384, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04704875962360992, |
|
"grad_norm": 7.213578701019287, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9618, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.04790419161676647, |
|
"grad_norm": 5.207120418548584, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 0.7529, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04875962360992301, |
|
"grad_norm": 4.69100284576416, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 0.6933, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04961505560307956, |
|
"grad_norm": 2.485616683959961, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 0.4986, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0504704875962361, |
|
"grad_norm": 1.7521893978118896, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 0.382, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.05132591958939264, |
|
"grad_norm": 1.4033925533294678, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.343, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05218135158254919, |
|
"grad_norm": 1.392710566520691, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 0.3722, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.05303678357570573, |
|
"grad_norm": 1.1755892038345337, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 0.3236, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.05389221556886228, |
|
"grad_norm": 0.8601241707801819, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 0.2427, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.05474764756201882, |
|
"grad_norm": 0.9155849814414978, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.3333, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.055603079555175364, |
|
"grad_norm": 0.8021154403686523, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.2494, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.05645851154833191, |
|
"grad_norm": 0.8067910075187683, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.2427, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05731394354148845, |
|
"grad_norm": 0.6124118566513062, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 0.2362, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.058169375534645, |
|
"grad_norm": 0.7200761437416077, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 0.2506, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.05902480752780154, |
|
"grad_norm": 0.6140468120574951, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 0.242, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.059880239520958084, |
|
"grad_norm": 0.8597086071968079, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.3553, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.06073567151411463, |
|
"grad_norm": 0.8153907656669617, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 0.2191, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.06159110350727117, |
|
"grad_norm": 0.5583605766296387, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.2033, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.06244653550042772, |
|
"grad_norm": 0.5807018876075745, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 0.2504, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.06330196749358426, |
|
"grad_norm": 0.563032865524292, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 0.2386, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.0641573994867408, |
|
"grad_norm": 0.4548931121826172, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.2231, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.06501283147989735, |
|
"grad_norm": 0.545421838760376, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 0.2811, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.0658682634730539, |
|
"grad_norm": 0.8195017576217651, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 0.2811, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.06672369546621043, |
|
"grad_norm": 0.7224476337432861, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.3034, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.06757912745936698, |
|
"grad_norm": 0.738802433013916, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 0.3257, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.06843455945252352, |
|
"grad_norm": 0.7129746079444885, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.3329, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06928999144568007, |
|
"grad_norm": 0.7020834684371948, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 0.307, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.07014542343883662, |
|
"grad_norm": 0.5662789940834045, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.3007, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.07100085543199315, |
|
"grad_norm": 0.6873999238014221, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 0.34, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.0718562874251497, |
|
"grad_norm": 0.7224177718162537, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.3833, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.07271171941830624, |
|
"grad_norm": 1.1468607187271118, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.3873, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.07356715141146279, |
|
"grad_norm": 0.7172346115112305, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 0.3333, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.07442258340461934, |
|
"grad_norm": 0.7859193086624146, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 0.4443, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.07527801539777587, |
|
"grad_norm": 0.7493287324905396, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 0.3925, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.07613344739093242, |
|
"grad_norm": 1.187333106994629, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 0.5375, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.07698887938408897, |
|
"grad_norm": 1.1516120433807373, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.5597, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07784431137724551, |
|
"grad_norm": 1.370631217956543, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 0.6242, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.07869974337040206, |
|
"grad_norm": 1.2987772226333618, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 0.6291, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.07955517536355859, |
|
"grad_norm": 1.182003378868103, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 0.7286, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.08041060735671514, |
|
"grad_norm": 2.1859383583068848, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 0.7186, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.08126603934987169, |
|
"grad_norm": 2.0418176651000977, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.7344, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.08212147134302823, |
|
"grad_norm": 1.459978461265564, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.7504, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.08297690333618478, |
|
"grad_norm": 2.8905394077301025, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 0.9282, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.08383233532934131, |
|
"grad_norm": 2.008319854736328, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 1.0452, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.08468776732249786, |
|
"grad_norm": 2.0289366245269775, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 1.0555, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.0855431993156544, |
|
"grad_norm": 5.0649800300598145, |
|
"learning_rate": 0.0, |
|
"loss": 1.9191, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0855431993156544, |
|
"eval_loss": 0.874006450176239, |
|
"eval_runtime": 7.3781, |
|
"eval_samples_per_second": 133.503, |
|
"eval_steps_per_second": 33.477, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1537626265878528.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|