|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 144, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 2.065, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 2.0734, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6e-06, |
|
"loss": 2.0409, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.9565, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1e-05, |
|
"loss": 1.9267, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.99872299773906e-06, |
|
"loss": 1.9743, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.994892643250147e-06, |
|
"loss": 1.9656, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.9885108930818e-06, |
|
"loss": 1.9553, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.979581007037776e-06, |
|
"loss": 1.8928, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.968107546511942e-06, |
|
"loss": 1.9141, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.95409637215831e-06, |
|
"loss": 1.9039, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.937554640897414e-06, |
|
"loss": 1.8886, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.918490802260538e-06, |
|
"loss": 1.871, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.896914594073703e-06, |
|
"loss": 1.8569, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.87283703748356e-06, |
|
"loss": 1.8244, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.846270431327793e-06, |
|
"loss": 1.797, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.817228345852853e-06, |
|
"loss": 1.833, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.785725615782262e-06, |
|
"loss": 1.8323, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 9.751778332739033e-06, |
|
"loss": 1.7928, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 9.715403837026046e-06, |
|
"loss": 1.839, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.676620708768608e-06, |
|
"loss": 1.8033, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.635448758423703e-06, |
|
"loss": 1.7964, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 9.591909016660806e-06, |
|
"loss": 1.7938, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.546023723619387e-06, |
|
"loss": 1.8156, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.497816317548625e-06, |
|
"loss": 1.8056, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 9.447311422835141e-06, |
|
"loss": 1.8169, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.39453483742483e-06, |
|
"loss": 1.7668, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.33951351964525e-06, |
|
"loss": 1.7645, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.28227557443528e-06, |
|
"loss": 1.7752, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 9.222850238989104e-06, |
|
"loss": 1.7713, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9.161267867821802e-06, |
|
"loss": 1.7592, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9.097559917264268e-06, |
|
"loss": 1.7947, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 9.031758929395259e-06, |
|
"loss": 1.7783, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.963898515418885e-06, |
|
"loss": 1.7728, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 8.89401333849598e-06, |
|
"loss": 1.747, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.82213909603812e-06, |
|
"loss": 1.7694, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 8.748312501473351e-06, |
|
"loss": 1.7631, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 8.672571265492944e-06, |
|
"loss": 1.7887, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 8.594954076788736e-06, |
|
"loss": 1.7259, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 8.515500582290914e-06, |
|
"loss": 1.7392, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 8.434251366916323e-06, |
|
"loss": 1.7701, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 8.351247932837655e-06, |
|
"loss": 1.7556, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 8.266532678284103e-06, |
|
"loss": 1.7706, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 8.18014887588431e-06, |
|
"loss": 1.744, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 8.092140650562665e-06, |
|
"loss": 1.7396, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 8.002552957000254e-06, |
|
"loss": 1.7493, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 7.911431556671967e-06, |
|
"loss": 1.7445, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 7.818822994471504e-06, |
|
"loss": 1.75, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 7.72477457493619e-06, |
|
"loss": 1.7622, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 7.629334338083774e-06, |
|
"loss": 1.7649, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 7.532551034873558e-06, |
|
"loss": 1.7704, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 7.43447410230435e-06, |
|
"loss": 1.7634, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 7.335153638162005e-06, |
|
"loss": 1.7069, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 7.234640375429427e-06, |
|
"loss": 1.7534, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 7.132985656372126e-06, |
|
"loss": 1.7701, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 7.030241406312528e-06, |
|
"loss": 1.7765, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 6.926460107106483e-06, |
|
"loss": 1.7523, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 6.8216947703354815e-06, |
|
"loss": 1.7728, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 6.715998910228296e-06, |
|
"loss": 1.7448, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 6.609426516325859e-06, |
|
"loss": 1.7383, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 6.502032025903356e-06, |
|
"loss": 1.7549, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 6.393870296163616e-06, |
|
"loss": 1.7524, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 6.284996576216014e-06, |
|
"loss": 1.7553, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 6.175466478855161e-06, |
|
"loss": 1.7088, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 6.065335952153846e-06, |
|
"loss": 1.725, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 5.954661250884704e-06, |
|
"loss": 1.7589, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 5.843498907785236e-06, |
|
"loss": 1.7447, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 5.731905704680834e-06, |
|
"loss": 1.7236, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 5.6199386434805615e-06, |
|
"loss": 1.7343, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 5.507654917060541e-06, |
|
"loss": 1.7354, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 5.395111880049775e-06, |
|
"loss": 1.683, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.28236701953335e-06, |
|
"loss": 1.7345, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 5.169477925687981e-06, |
|
"loss": 1.6867, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 5.0565022623649e-06, |
|
"loss": 1.734, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.943497737635103e-06, |
|
"loss": 1.7232, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.830522074312019e-06, |
|
"loss": 1.7135, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.717632980466652e-06, |
|
"loss": 1.6894, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.6048881199502265e-06, |
|
"loss": 1.704, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.49234508293946e-06, |
|
"loss": 1.6857, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.38006135651944e-06, |
|
"loss": 1.7471, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.268094295319167e-06, |
|
"loss": 1.7243, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.1565010922147644e-06, |
|
"loss": 1.7297, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.045338749115299e-06, |
|
"loss": 1.7297, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 3.934664047846157e-06, |
|
"loss": 1.7569, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 3.8245335211448404e-06, |
|
"loss": 1.7305, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.715003423783986e-06, |
|
"loss": 1.7345, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.6061297038363853e-06, |
|
"loss": 1.7571, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.497967974096647e-06, |
|
"loss": 1.7386, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.3905734836741415e-06, |
|
"loss": 1.6932, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.2840010897717045e-06, |
|
"loss": 1.7224, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 3.178305229664519e-06, |
|
"loss": 1.7469, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.073539892893519e-06, |
|
"loss": 1.7062, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 2.969758593687475e-06, |
|
"loss": 1.7002, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 2.8670143436278757e-06, |
|
"loss": 1.7253, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 2.765359624570574e-06, |
|
"loss": 1.7215, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.664846361837997e-06, |
|
"loss": 1.6811, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.565525897695651e-06, |
|
"loss": 1.7047, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 2.4674489651264433e-06, |
|
"loss": 1.7149, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 2.3706656619162278e-06, |
|
"loss": 1.6934, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 2.275225425063813e-06, |
|
"loss": 1.6721, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.1811770055284968e-06, |
|
"loss": 1.7282, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 2.0885684433280336e-06, |
|
"loss": 1.7068, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.9974470429997482e-06, |
|
"loss": 1.727, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.907859349437336e-06, |
|
"loss": 1.6693, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.8198511241156902e-06, |
|
"loss": 1.7075, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.7334673217158976e-06, |
|
"loss": 1.6837, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.6487520671623469e-06, |
|
"loss": 1.743, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.5657486330836786e-06, |
|
"loss": 1.7136, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.4844994177090871e-06, |
|
"loss": 1.7284, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.4050459232112652e-06, |
|
"loss": 1.718, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3274287345070564e-06, |
|
"loss": 1.6898, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.2516874985266508e-06, |
|
"loss": 1.704, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.1778609039618804e-06, |
|
"loss": 1.7421, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.1059866615040205e-06, |
|
"loss": 1.6848, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.036101484581117e-06, |
|
"loss": 1.7273, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 9.68241070604743e-07, |
|
"loss": 1.7132, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 9.024400827357344e-07, |
|
"loss": 1.7239, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 8.387321321781977e-07, |
|
"loss": 1.6926, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 7.771497610108981e-07, |
|
"loss": 1.6812, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 7.177244255647209e-07, |
|
"loss": 1.7045, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 6.604864803547511e-07, |
|
"loss": 1.7108, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 6.054651625751717e-07, |
|
"loss": 1.6969, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 5.526885771648599e-07, |
|
"loss": 1.7463, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.021836824513759e-07, |
|
"loss": 1.7348, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.5397627638061604e-07, |
|
"loss": 1.7276, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.080909833391944e-07, |
|
"loss": 1.6837, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 3.6455124157629805e-07, |
|
"loss": 1.7146, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.233792912313943e-07, |
|
"loss": 1.7009, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.8459616297395464e-07, |
|
"loss": 1.72, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.482216672609677e-07, |
|
"loss": 1.6926, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.142743842177386e-07, |
|
"loss": 1.7535, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.8277165414714858e-07, |
|
"loss": 1.7184, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.5372956867220678e-07, |
|
"loss": 1.7114, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.2716296251644e-07, |
|
"loss": 1.7173, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.0308540592629756e-07, |
|
"loss": 1.7009, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 8.150919773946165e-08, |
|
"loss": 1.7014, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 6.244535910258697e-08, |
|
"loss": 1.6968, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.590362784169022e-08, |
|
"loss": 1.6818, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.1892453488058803e-08, |
|
"loss": 1.7045, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 2.0418992962224495e-08, |
|
"loss": 1.6931, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.1489106918200487e-08, |
|
"loss": 1.6903, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 5.107356749853298e-09, |
|
"loss": 1.7394, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.2770022609409628e-09, |
|
"loss": 1.7425, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.7309, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 144, |
|
"total_flos": 0.0, |
|
"train_loss": 1.7571394882268376, |
|
"train_runtime": 4539.073, |
|
"train_samples_per_second": 3.561, |
|
"train_steps_per_second": 0.032 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 144, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|