|
{ |
|
"best_metric": 1.6023043394088745, |
|
"best_model_checkpoint": "./lora-out/checkpoint-300", |
|
"epoch": 2.7993779160186625, |
|
"eval_steps": 50, |
|
"global_step": 900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2e-05, |
|
"loss": 1.7924, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4e-05, |
|
"loss": 1.8083, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6e-05, |
|
"loss": 1.8177, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8e-05, |
|
"loss": 1.7595, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6598, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00012, |
|
"loss": 1.6919, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00014, |
|
"loss": 1.6706, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00016, |
|
"loss": 1.6879, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00018, |
|
"loss": 1.7051, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7022, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000199999456645141, |
|
"loss": 1.6809, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019999782658646859, |
|
"loss": 1.6098, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001999951098416968, |
|
"loss": 1.7014, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019999130644034888, |
|
"loss": 1.5885, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019998641642375657, |
|
"loss": 1.6243, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019998043984506027, |
|
"loss": 1.6484, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019997337676920803, |
|
"loss": 1.6093, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019996522727295496, |
|
"loss": 1.6173, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019995599144486247, |
|
"loss": 1.646, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019994566938529712, |
|
"loss": 1.6469, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019993426120642983, |
|
"loss": 1.6564, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019992176703223432, |
|
"loss": 1.5901, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000199908186998486, |
|
"loss": 1.664, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019989352125276047, |
|
"loss": 1.6275, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019987776995443178, |
|
"loss": 1.5839, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019986093327467076, |
|
"loss": 1.5611, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019984301139644334, |
|
"loss": 1.669, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001998240045145083, |
|
"loss": 1.5641, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019980391283541522, |
|
"loss": 1.6023, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019978273657750238, |
|
"loss": 1.6309, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001997604759708942, |
|
"loss": 1.6353, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019973713125749884, |
|
"loss": 1.6328, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019971270269100564, |
|
"loss": 1.5683, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019968719053688213, |
|
"loss": 1.6217, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001996605950723714, |
|
"loss": 1.5734, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019963291658648896, |
|
"loss": 1.6162, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019960415538001957, |
|
"loss": 1.5922, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001995743117655141, |
|
"loss": 1.5806, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000199543386067286, |
|
"loss": 1.5938, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019951137862140778, |
|
"loss": 1.6386, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019947828977570756, |
|
"loss": 1.6476, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019944411988976496, |
|
"loss": 1.6557, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019940886933490749, |
|
"loss": 1.5836, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019937253849420635, |
|
"loss": 1.6421, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001993351277624723, |
|
"loss": 1.629, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019929663754625145, |
|
"loss": 1.6392, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019925706826382064, |
|
"loss": 1.5677, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019921642034518317, |
|
"loss": 1.6144, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019917469423206389, |
|
"loss": 1.6068, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019913189037790456, |
|
"loss": 1.6421, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 1.621693730354309, |
|
"eval_runtime": 233.7603, |
|
"eval_samples_per_second": 16.354, |
|
"eval_steps_per_second": 4.09, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001990880092478588, |
|
"loss": 1.6172, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001990430513187871, |
|
"loss": 1.6095, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019899701707925166, |
|
"loss": 1.5967, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019894990702951106, |
|
"loss": 1.617, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019890172168151473, |
|
"loss": 1.5932, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001988524615588976, |
|
"loss": 1.6548, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019880212719697413, |
|
"loss": 1.6033, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019875071914273278, |
|
"loss": 1.6063, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019869823795482986, |
|
"loss": 1.6107, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019864468420358354, |
|
"loss": 1.5758, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019859005847096763, |
|
"loss": 1.5723, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019853436135060527, |
|
"loss": 1.542, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019847759344776252, |
|
"loss": 1.5611, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019841975537934162, |
|
"loss": 1.6157, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019836084777387458, |
|
"loss": 1.5589, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019830087127151598, |
|
"loss": 1.6077, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019823982652403634, |
|
"loss": 1.5473, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019817771419481487, |
|
"loss": 1.6265, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001981145349588323, |
|
"loss": 1.6074, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019805028950266348, |
|
"loss": 1.6195, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019798497852447006, |
|
"loss": 1.5876, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001979186027339928, |
|
"loss": 1.5978, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019785116285254381, |
|
"loss": 1.533, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019778265961299888, |
|
"loss": 1.5888, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001977130937597894, |
|
"loss": 1.6211, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019764246604889415, |
|
"loss": 1.6091, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019757077724783147, |
|
"loss": 1.6012, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001974980281356504, |
|
"loss": 1.6401, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001974242195029227, |
|
"loss": 1.6111, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019734935215173392, |
|
"loss": 1.6208, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019727342689567482, |
|
"loss": 1.6038, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019719644455983256, |
|
"loss": 1.5915, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001971184059807817, |
|
"loss": 1.5872, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000197039312006575, |
|
"loss": 1.5984, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001969591634967344, |
|
"loss": 1.5996, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019687796132224152, |
|
"loss": 1.6056, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001967957063655283, |
|
"loss": 1.6099, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001967123995204674, |
|
"loss": 1.6295, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019662804169236225, |
|
"loss": 1.5482, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019654263379793773, |
|
"loss": 1.5781, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019645617676532963, |
|
"loss": 1.5954, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000196368671534075, |
|
"loss": 1.619, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001962801190551016, |
|
"loss": 1.6153, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001961905202907179, |
|
"loss": 1.6008, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019609987621460232, |
|
"loss": 1.5891, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001960081878117929, |
|
"loss": 1.6438, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001959154560786764, |
|
"loss": 1.5576, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019582168202297758, |
|
"loss": 1.646, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019572686666374822, |
|
"loss": 1.6269, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019563101103135602, |
|
"loss": 1.6288, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_loss": 1.6143836975097656, |
|
"eval_runtime": 233.6412, |
|
"eval_samples_per_second": 16.363, |
|
"eval_steps_per_second": 4.092, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019553411616747348, |
|
"loss": 1.5667, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019543618312506647, |
|
"loss": 1.6221, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001953372129683829, |
|
"loss": 1.5992, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001952372067729411, |
|
"loss": 1.6138, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019513616562551807, |
|
"loss": 1.51, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019503409062413782, |
|
"loss": 1.6227, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019493098287805927, |
|
"loss": 1.6014, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019482684350776434, |
|
"loss": 1.625, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001947216736449457, |
|
"loss": 1.6109, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001946154744324945, |
|
"loss": 1.62, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019450824702448778, |
|
"loss": 1.5878, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001943999925861763, |
|
"loss": 1.6264, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019429071229397157, |
|
"loss": 1.6186, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001941804073354331, |
|
"loss": 1.6363, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019406907890925562, |
|
"loss": 1.5341, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019395672822525593, |
|
"loss": 1.5986, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019384335650435985, |
|
"loss": 1.6181, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001937289649785889, |
|
"loss": 1.6118, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001936135548910469, |
|
"loss": 1.6404, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019349712749590649, |
|
"loss": 1.583, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019337968405839547, |
|
"loss": 1.5827, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019326122585478308, |
|
"loss": 1.6392, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019314175417236616, |
|
"loss": 1.5861, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019302127030945508, |
|
"loss": 1.5738, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001928997755753597, |
|
"loss": 1.5915, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019277727129037508, |
|
"loss": 1.617, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001926537587857672, |
|
"loss": 1.5582, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019252923940375844, |
|
"loss": 1.6294, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019240371449751306, |
|
"loss": 1.6087, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019227718543112236, |
|
"loss": 1.5749, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019214965357959005, |
|
"loss": 1.6041, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019202112032881715, |
|
"loss": 1.6106, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019189158707558695, |
|
"loss": 1.5553, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019176105522754995, |
|
"loss": 1.5638, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001916295262032084, |
|
"loss": 1.5921, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019149700143190096, |
|
"loss": 1.5837, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019136348235378726, |
|
"loss": 1.6341, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019122897041983205, |
|
"loss": 1.5678, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019109346709178963, |
|
"loss": 1.6137, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001909569738421878, |
|
"loss": 1.6324, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019081949215431194, |
|
"loss": 1.612, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019068102352218897, |
|
"loss": 1.5908, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019054156945057097, |
|
"loss": 1.6087, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019040113145491887, |
|
"loss": 1.5613, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000190259711061386, |
|
"loss": 1.6072, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019011730980680156, |
|
"loss": 1.5722, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001899739292386538, |
|
"loss": 1.5961, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00018982957091507325, |
|
"loss": 1.5409, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001896842364048159, |
|
"loss": 1.6557, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000189537927287246, |
|
"loss": 1.5725, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_loss": 1.6101970672607422, |
|
"eval_runtime": 233.5313, |
|
"eval_samples_per_second": 16.37, |
|
"eval_steps_per_second": 4.094, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00018939064515231888, |
|
"loss": 1.5949, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001892423916005639, |
|
"loss": 1.6191, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018909316824306674, |
|
"loss": 1.5487, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018894297670145216, |
|
"loss": 1.5104, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018879181860786623, |
|
"loss": 1.6392, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00018863969560495866, |
|
"loss": 1.5932, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00018848660934586491, |
|
"loss": 1.6213, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001883325614941882, |
|
"loss": 1.5515, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00018817755372398155, |
|
"loss": 1.6166, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018802158771972943, |
|
"loss": 1.6552, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018786466517632956, |
|
"loss": 1.6378, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018770678779907448, |
|
"loss": 1.5176, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018754795730363302, |
|
"loss": 1.5793, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018738817541603156, |
|
"loss": 1.6616, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018722744387263544, |
|
"loss": 1.6055, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018706576442012994, |
|
"loss": 1.6204, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018690313881550137, |
|
"loss": 1.5952, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018673956882601803, |
|
"loss": 1.6271, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018657505622921082, |
|
"loss": 1.538, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018640960281285417, |
|
"loss": 1.5874, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001862432103749464, |
|
"loss": 1.5694, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018607588072369033, |
|
"loss": 1.583, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00018590761567747354, |
|
"loss": 1.5961, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00018573841706484866, |
|
"loss": 1.582, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001855682867245134, |
|
"loss": 1.6427, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018539722650529075, |
|
"loss": 1.604, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018522523826610868, |
|
"loss": 1.577, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018505232387598018, |
|
"loss": 1.6339, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00018487848521398265, |
|
"loss": 1.5993, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001847037241692378, |
|
"loss": 1.6286, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00018452804264089084, |
|
"loss": 1.5963, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018435144253809, |
|
"loss": 1.5856, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018417392577996578, |
|
"loss": 1.5787, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018399549429561006, |
|
"loss": 1.5876, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018381615002405509, |
|
"loss": 1.5565, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018363589491425248, |
|
"loss": 1.5897, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001834547309250521, |
|
"loss": 1.5951, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018327266002518056, |
|
"loss": 1.5447, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018308968419322003, |
|
"loss": 1.6087, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018290580541758668, |
|
"loss": 1.5946, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018272102569650905, |
|
"loss": 1.6148, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00018253534703800627, |
|
"loss": 1.649, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001823487714598664, |
|
"loss": 1.6312, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001821613009896244, |
|
"loss": 1.5858, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018197293766454003, |
|
"loss": 1.5925, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0001817836835315759, |
|
"loss": 1.5604, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018159354064737506, |
|
"loss": 1.6125, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0001814025110782387, |
|
"loss": 1.5954, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018121059690010368, |
|
"loss": 1.5937, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018101780019852008, |
|
"loss": 1.5582, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_loss": 1.6065257787704468, |
|
"eval_runtime": 233.7919, |
|
"eval_samples_per_second": 16.352, |
|
"eval_steps_per_second": 4.089, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018082412306862837, |
|
"loss": 1.5628, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018062956761513675, |
|
"loss": 1.5735, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018043413595229818, |
|
"loss": 1.6011, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018023783020388763, |
|
"loss": 1.5434, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00018004065250317868, |
|
"loss": 1.5533, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00017984260499292058, |
|
"loss": 1.6074, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00017964368982531487, |
|
"loss": 1.5286, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00017944390916199203, |
|
"loss": 1.5161, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00017924326517398793, |
|
"loss": 1.6024, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00017904176004172027, |
|
"loss": 1.5727, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0001788393959549649, |
|
"loss": 1.5752, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00017863617511283203, |
|
"loss": 1.5845, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00017843209972374233, |
|
"loss": 1.6082, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017822717200540283, |
|
"loss": 1.5895, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017802139418478298, |
|
"loss": 1.5836, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017781476849809038, |
|
"loss": 1.5996, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017760729719074644, |
|
"loss": 1.6256, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000177398982517362, |
|
"loss": 1.628, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00017718982674171284, |
|
"loss": 1.5543, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00017697983213671515, |
|
"loss": 1.5732, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001767690009844007, |
|
"loss": 1.5892, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001765573355758921, |
|
"loss": 1.6524, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00017634483821137787, |
|
"loss": 1.5694, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001761315112000876, |
|
"loss": 1.6006, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00017591735686026661, |
|
"loss": 1.6161, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00017570237751915092, |
|
"loss": 1.595, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00017548657551294192, |
|
"loss": 1.6072, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.000175269953186781, |
|
"loss": 1.5855, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00017505251289472406, |
|
"loss": 1.597, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001748342569997158, |
|
"loss": 1.5837, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00017461518787356432, |
|
"loss": 1.5422, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00017439530789691506, |
|
"loss": 1.5837, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001741746194592251, |
|
"loss": 1.6038, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00017395312495873717, |
|
"loss": 1.5882, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00017373082680245347, |
|
"loss": 1.5763, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00017350772740610976, |
|
"loss": 1.6046, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017328382919414877, |
|
"loss": 1.594, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017305913459969414, |
|
"loss": 1.5903, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017283364606452396, |
|
"loss": 1.5704, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0001726073660390439, |
|
"loss": 1.588, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00017238029698226113, |
|
"loss": 1.6273, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00017215244136175705, |
|
"loss": 1.5166, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00017192380165366092, |
|
"loss": 1.5813, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001716943803426226, |
|
"loss": 1.5654, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001714641799217858, |
|
"loss": 1.5548, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017123320289276085, |
|
"loss": 1.5491, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001710014517655976, |
|
"loss": 1.5903, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017076892905875806, |
|
"loss": 1.5687, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017053563729908905, |
|
"loss": 1.5975, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00017030157902179485, |
|
"loss": 1.6055, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_loss": 1.60513174533844, |
|
"eval_runtime": 233.7813, |
|
"eval_samples_per_second": 16.353, |
|
"eval_steps_per_second": 4.089, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00017006675677040946, |
|
"loss": 1.4661, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00016983117309676908, |
|
"loss": 1.6071, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00016959483056098445, |
|
"loss": 1.5664, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001693577317314129, |
|
"loss": 1.5189, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00016911987918463034, |
|
"loss": 1.5488, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0001688812755054036, |
|
"loss": 1.6153, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00016864192328666202, |
|
"loss": 1.536, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00016840182512946943, |
|
"loss": 1.624, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00016816098364299582, |
|
"loss": 1.569, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00016791940144448902, |
|
"loss": 1.588, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0001676770811592463, |
|
"loss": 1.5626, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00016743402542058572, |
|
"loss": 1.5836, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00016719023686981763, |
|
"loss": 1.5573, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00016694571815621586, |
|
"loss": 1.5815, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00016670047193698912, |
|
"loss": 1.64, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0001664545008772518, |
|
"loss": 1.6395, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00016620780764999536, |
|
"loss": 1.5927, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00016596039493605913, |
|
"loss": 1.605, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.000165712265424101, |
|
"loss": 1.6219, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001654634218105686, |
|
"loss": 1.5458, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001652138667996696, |
|
"loss": 1.59, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00016496360310334253, |
|
"loss": 1.633, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001647126334412274, |
|
"loss": 1.6108, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001644609605406358, |
|
"loss": 1.5747, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0001642085871365217, |
|
"loss": 1.5393, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00016395551597145133, |
|
"loss": 1.5768, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00016370174979557368, |
|
"loss": 1.6278, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0001634472913665904, |
|
"loss": 1.5983, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00016319214344972602, |
|
"loss": 1.5701, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00016293630881769773, |
|
"loss": 1.5874, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001626797902506853, |
|
"loss": 1.5412, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.000162422590536301, |
|
"loss": 1.5733, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00016216471246955906, |
|
"loss": 1.6245, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00016190615885284553, |
|
"loss": 1.5743, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00016164693249588768, |
|
"loss": 1.5793, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00016138703621572346, |
|
"loss": 1.5672, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001611264728366711, |
|
"loss": 1.5442, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001608652451902981, |
|
"loss": 1.5765, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00016060335611539072, |
|
"loss": 1.6058, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00016034080845792295, |
|
"loss": 1.6156, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001600776050710257, |
|
"loss": 1.6179, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001598137488149558, |
|
"loss": 1.5747, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015954924255706478, |
|
"loss": 1.5772, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015928408917176786, |
|
"loss": 1.6064, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015901829154051265, |
|
"loss": 1.6082, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015875185255174787, |
|
"loss": 1.5768, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001584847751008918, |
|
"loss": 1.5466, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00015821706209030118, |
|
"loss": 1.5127, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00015794871642923927, |
|
"loss": 1.5745, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00015767974103384443, |
|
"loss": 1.5733, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_loss": 1.6023043394088745, |
|
"eval_runtime": 233.7298, |
|
"eval_samples_per_second": 16.356, |
|
"eval_steps_per_second": 4.09, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001574101388270984, |
|
"loss": 1.6189, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001571399127387946, |
|
"loss": 1.54, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00015686906570550616, |
|
"loss": 1.5419, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015659760067055417, |
|
"loss": 1.576, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015632552058397544, |
|
"loss": 1.6072, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015605282840249087, |
|
"loss": 1.5429, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015577952708947272, |
|
"loss": 1.5149, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00015550561961491304, |
|
"loss": 1.5744, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00015523110895539097, |
|
"loss": 1.6155, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00015495599809404044, |
|
"loss": 1.541, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.000154680290020518, |
|
"loss": 1.5227, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00015440398773097002, |
|
"loss": 1.5462, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00015412709422800037, |
|
"loss": 1.56, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00015384961252063763, |
|
"loss": 1.6597, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00015357154562430252, |
|
"loss": 1.5917, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.000153292896560775, |
|
"loss": 1.6058, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001530136683581615, |
|
"loss": 1.581, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00015273386405086209, |
|
"loss": 1.592, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00015245348667953726, |
|
"loss": 1.5711, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0001521725392910753, |
|
"loss": 1.5829, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015189102493855868, |
|
"loss": 1.5786, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015160894668123123, |
|
"loss": 1.5848, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0001513263075844648, |
|
"loss": 1.482, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.000151043110719726, |
|
"loss": 1.495, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00015075935916454255, |
|
"loss": 1.4535, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00015047505600247028, |
|
"loss": 1.5398, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001501902043230592, |
|
"loss": 1.4649, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00014990480722182022, |
|
"loss": 1.512, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001496188678001914, |
|
"loss": 1.4365, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00014933238916550425, |
|
"loss": 1.5408, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00014904537443094986, |
|
"loss": 1.4992, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00014875782671554526, |
|
"loss": 1.5125, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00014846974914409943, |
|
"loss": 1.4823, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00014818114484717933, |
|
"loss": 1.4985, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00014789201696107594, |
|
"loss": 1.457, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00014760236862777, |
|
"loss": 1.4623, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0001473122029948982, |
|
"loss": 1.466, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0001470215232157186, |
|
"loss": 1.4982, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00014673033244907665, |
|
"loss": 1.4369, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00014643863385937076, |
|
"loss": 1.4698, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00014614643061651772, |
|
"loss": 1.4462, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0001458537258959186, |
|
"loss": 1.4513, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00014556052287842413, |
|
"loss": 1.4304, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00014526682475029994, |
|
"loss": 1.4953, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00014497263470319215, |
|
"loss": 1.4209, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00014467795593409256, |
|
"loss": 1.4522, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.000144382791645304, |
|
"loss": 1.495, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0001440871450444055, |
|
"loss": 1.4461, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00014379101934421736, |
|
"loss": 1.4592, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001434944177627664, |
|
"loss": 1.4885, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 1.6130114793777466, |
|
"eval_runtime": 233.7594, |
|
"eval_samples_per_second": 16.354, |
|
"eval_steps_per_second": 4.09, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00014319734352325077, |
|
"loss": 1.5119, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00014289979985400515, |
|
"loss": 1.4618, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00014260178998846547, |
|
"loss": 1.499, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00014230331716513396, |
|
"loss": 1.4611, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00014200438462754373, |
|
"loss": 1.4503, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014170499562422376, |
|
"loss": 1.472, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014140515340866337, |
|
"loss": 1.4654, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014110486123927718, |
|
"loss": 1.4245, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001408041223793693, |
|
"loss": 1.4944, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00014050294009709813, |
|
"loss": 1.481, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00014020131766544084, |
|
"loss": 1.4592, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001398992583621577, |
|
"loss": 1.5189, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001395967654697565, |
|
"loss": 1.4575, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00013929384227545692, |
|
"loss": 1.5033, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001389904920711547, |
|
"loss": 1.5161, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00013868671815338605, |
|
"loss": 1.4703, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001383825238232916, |
|
"loss": 1.4617, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00013807791238658077, |
|
"loss": 1.4599, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00013777288715349559, |
|
"loss": 1.4871, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001374674514387749, |
|
"loss": 1.4825, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00013716160856161834, |
|
"loss": 1.5001, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00013685536184565017, |
|
"loss": 1.3828, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00013654871461888317, |
|
"loss": 1.4882, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00013624167021368257, |
|
"loss": 1.4426, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0001359342319667298, |
|
"loss": 1.4827, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00013562640321898613, |
|
"loss": 1.4811, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00013531818731565647, |
|
"loss": 1.4937, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00013500958760615306, |
|
"loss": 1.4668, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00013470060744405883, |
|
"loss": 1.4579, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0001343912501870913, |
|
"loss": 1.4692, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00013408151919706583, |
|
"loss": 1.4927, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00013377141783985918, |
|
"loss": 1.5073, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00013346094948537296, |
|
"loss": 1.4771, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00013315011750749688, |
|
"loss": 1.5233, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00013283892528407235, |
|
"loss": 1.4379, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00013252737619685542, |
|
"loss": 1.493, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00013221547363148034, |
|
"loss": 1.4174, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00013190322097742259, |
|
"loss": 1.4108, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00013159062162796208, |
|
"loss": 1.4713, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00013127767898014637, |
|
"loss": 1.4511, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0001309643964347536, |
|
"loss": 1.4752, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00013065077739625566, |
|
"loss": 1.4798, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00013033682527278107, |
|
"loss": 1.4372, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0001300225434760781, |
|
"loss": 1.4556, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00012970793542147756, |
|
"loss": 1.5026, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00012939300452785574, |
|
"loss": 1.4878, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00012907775421759732, |
|
"loss": 1.479, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.000128762187916558, |
|
"loss": 1.4508, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001284463090540275, |
|
"loss": 1.4923, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00012813012106269208, |
|
"loss": 1.484, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 1.616938829421997, |
|
"eval_runtime": 233.7894, |
|
"eval_samples_per_second": 16.352, |
|
"eval_steps_per_second": 4.089, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00012781362737859735, |
|
"loss": 1.4867, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00012749683144111095, |
|
"loss": 1.4923, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00012717973669288513, |
|
"loss": 1.4858, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00012686234657981933, |
|
"loss": 1.4464, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00012654466455102272, |
|
"loss": 1.4598, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00012622669405877685, |
|
"loss": 1.4237, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0001259084385584979, |
|
"loss": 1.475, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00012558990150869935, |
|
"loss": 1.5201, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00012527108637095427, |
|
"loss": 1.4735, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00012495199660985767, |
|
"loss": 1.4676, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00012463263569298914, |
|
"loss": 1.4671, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00012431300709087468, |
|
"loss": 1.4724, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00012399311427694945, |
|
"loss": 1.5451, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001236729607275197, |
|
"loss": 1.492, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00012335254992172512, |
|
"loss": 1.5186, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001230318853415012, |
|
"loss": 1.4622, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00012271097047154096, |
|
"loss": 1.4937, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00012238980879925756, |
|
"loss": 1.4575, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00012206840381474608, |
|
"loss": 1.4801, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00012174675901074577, |
|
"loss": 1.4523, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00012142487788260191, |
|
"loss": 1.4957, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00012110276392822799, |
|
"loss": 1.4757, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0001207804206480677, |
|
"loss": 1.4769, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00012045785154505676, |
|
"loss": 1.4435, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.000120135060124585, |
|
"loss": 1.5211, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00011981204989445811, |
|
"loss": 1.4248, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00011948882436485969, |
|
"loss": 1.4883, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00011916538704831293, |
|
"loss": 1.4919, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00011884174145964262, |
|
"loss": 1.4689, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00011851789111593688, |
|
"loss": 1.4071, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00011819383953650874, |
|
"loss": 1.4418, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00011786959024285826, |
|
"loss": 1.5206, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00011754514675863408, |
|
"loss": 1.446, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.000117220512609595, |
|
"loss": 1.5165, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0001168956913235719, |
|
"loss": 1.4119, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00011657068643042924, |
|
"loss": 1.503, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00011624550146202682, |
|
"loss": 1.4573, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00011592013995218123, |
|
"loss": 1.4707, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00011559460543662768, |
|
"loss": 1.4304, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00011526890145298137, |
|
"loss": 1.4465, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001149430315406991, |
|
"loss": 1.4912, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001146169992410409, |
|
"loss": 1.4549, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00011429080809703145, |
|
"loss": 1.4528, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00011396446165342165, |
|
"loss": 1.4148, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00011363796345665001, |
|
"loss": 1.467, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0001133113170548041, |
|
"loss": 1.492, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00011298452599758217, |
|
"loss": 1.5244, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00011265759383625436, |
|
"loss": 1.4553, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0001123305241236243, |
|
"loss": 1.4764, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00011200332041399027, |
|
"loss": 1.4354, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_loss": 1.6193681955337524, |
|
"eval_runtime": 233.6751, |
|
"eval_samples_per_second": 16.36, |
|
"eval_steps_per_second": 4.091, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00011167598626310682, |
|
"loss": 1.4946, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00011134852522814596, |
|
"loss": 1.4558, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001110209408676586, |
|
"loss": 1.4549, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00011069323674153585, |
|
"loss": 1.4992, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001103654164109702, |
|
"loss": 1.4828, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00011003748343841711, |
|
"loss": 1.4939, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00010970944138755604, |
|
"loss": 1.4761, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00010938129382325184, |
|
"loss": 1.4394, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00010905304431151602, |
|
"loss": 1.4852, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00010872469641946783, |
|
"loss": 1.4479, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00010839625371529583, |
|
"loss": 1.5161, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00010806771976821872, |
|
"loss": 1.5104, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0001077390981484469, |
|
"loss": 1.5056, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00010741039242714337, |
|
"loss": 1.4919, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00010708160617638521, |
|
"loss": 1.4605, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00010675274296912452, |
|
"loss": 1.5191, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00010642380637914975, |
|
"loss": 1.4504, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010609479998104684, |
|
"loss": 1.4619, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010576572735016016, |
|
"loss": 1.4619, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010543659206255409, |
|
"loss": 1.4962, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010510739769497378, |
|
"loss": 1.4901, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0001047781478248063, |
|
"loss": 1.4708, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010444884603004213, |
|
"loss": 1.4756, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010411949588923577, |
|
"loss": 1.3948, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00010379010098146728, |
|
"loss": 1.5183, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00010346066488630308, |
|
"loss": 1.4252, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00010313119118375727, |
|
"loss": 1.4686, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00010280168345425256, |
|
"loss": 1.5285, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00010247214527858149, |
|
"loss": 1.4649, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0001021425802378674, |
|
"loss": 1.4602, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00010181299191352566, |
|
"loss": 1.5102, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00010148338388722465, |
|
"loss": 1.4894, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00010115375974084677, |
|
"loss": 1.501, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00010082412305644964, |
|
"loss": 1.481, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00010049447741622717, |
|
"loss": 1.4927, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00010016482640247058, |
|
"loss": 1.512, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 9.983517359752945e-05, |
|
"loss": 1.4622, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 9.950552258377284e-05, |
|
"loss": 1.4956, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 9.917587694355037e-05, |
|
"loss": 1.493, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 9.884624025915328e-05, |
|
"loss": 1.4629, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.851661611277537e-05, |
|
"loss": 1.4531, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.818700808647435e-05, |
|
"loss": 1.4656, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.785741976213261e-05, |
|
"loss": 1.4982, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.752785472141854e-05, |
|
"loss": 1.5053, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.719831654574745e-05, |
|
"loss": 1.4619, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.686880881624275e-05, |
|
"loss": 1.486, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.653933511369696e-05, |
|
"loss": 1.4788, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.620989901853275e-05, |
|
"loss": 1.4663, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.588050411076424e-05, |
|
"loss": 1.5138, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.555115396995788e-05, |
|
"loss": 1.4427, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_loss": 1.6187018156051636, |
|
"eval_runtime": 233.6591, |
|
"eval_samples_per_second": 16.361, |
|
"eval_steps_per_second": 4.091, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.522185217519371e-05, |
|
"loss": 1.4696, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.489260230502626e-05, |
|
"loss": 1.4052, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.45634079374459e-05, |
|
"loss": 1.4688, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.423427264983986e-05, |
|
"loss": 1.4266, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.390520001895321e-05, |
|
"loss": 1.4887, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.357619362085027e-05, |
|
"loss": 1.4992, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.32472570308755e-05, |
|
"loss": 1.4626, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.291839382361481e-05, |
|
"loss": 1.4984, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.258960757285664e-05, |
|
"loss": 1.3692, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.226090185155314e-05, |
|
"loss": 1.4325, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.19322802317813e-05, |
|
"loss": 1.5049, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.160374628470421e-05, |
|
"loss": 1.4589, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.127530358053218e-05, |
|
"loss": 1.4291, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.094695568848402e-05, |
|
"loss": 1.4474, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.061870617674817e-05, |
|
"loss": 1.513, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.029055861244397e-05, |
|
"loss": 1.4609, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 8.99625165615829e-05, |
|
"loss": 1.5144, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 8.963458358902985e-05, |
|
"loss": 1.4294, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 8.93067632584642e-05, |
|
"loss": 1.4516, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 8.897905913234143e-05, |
|
"loss": 1.4659, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 8.865147477185405e-05, |
|
"loss": 1.4787, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 8.832401373689319e-05, |
|
"loss": 1.4601, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 8.799667958600973e-05, |
|
"loss": 1.4955, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 8.766947587637573e-05, |
|
"loss": 1.4231, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 8.734240616374565e-05, |
|
"loss": 1.4952, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 8.701547400241788e-05, |
|
"loss": 1.4707, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 8.668868294519593e-05, |
|
"loss": 1.5023, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 8.636203654335002e-05, |
|
"loss": 1.4702, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.603553834657836e-05, |
|
"loss": 1.4399, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.570919190296855e-05, |
|
"loss": 1.5175, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.53830007589591e-05, |
|
"loss": 1.4715, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.505696845930096e-05, |
|
"loss": 1.5292, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.473109854701869e-05, |
|
"loss": 1.5287, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.440539456337235e-05, |
|
"loss": 1.4762, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.407986004781879e-05, |
|
"loss": 1.4536, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.375449853797322e-05, |
|
"loss": 1.5018, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.342931356957076e-05, |
|
"loss": 1.4723, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.310430867642812e-05, |
|
"loss": 1.4905, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.277948739040503e-05, |
|
"loss": 1.4651, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.245485324136597e-05, |
|
"loss": 1.4482, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.213040975714175e-05, |
|
"loss": 1.3977, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.180616046349129e-05, |
|
"loss": 1.5594, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.148210888406316e-05, |
|
"loss": 1.4995, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.115825854035737e-05, |
|
"loss": 1.5106, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.083461295168707e-05, |
|
"loss": 1.4219, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.051117563514036e-05, |
|
"loss": 1.4766, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.018795010554193e-05, |
|
"loss": 1.5241, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 7.986493987541502e-05, |
|
"loss": 1.4673, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 7.954214845494325e-05, |
|
"loss": 1.4236, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 7.921957935193232e-05, |
|
"loss": 1.4687, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_loss": 1.617763876914978, |
|
"eval_runtime": 233.6334, |
|
"eval_samples_per_second": 16.363, |
|
"eval_steps_per_second": 4.092, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 7.889723607177202e-05, |
|
"loss": 1.4412, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 7.857512211739813e-05, |
|
"loss": 1.4464, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 7.825324098925427e-05, |
|
"loss": 1.4043, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 7.793159618525393e-05, |
|
"loss": 1.4384, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 7.761019120074245e-05, |
|
"loss": 1.4781, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 7.728902952845905e-05, |
|
"loss": 1.4311, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 7.696811465849883e-05, |
|
"loss": 1.4926, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.664745007827489e-05, |
|
"loss": 1.4739, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.632703927248033e-05, |
|
"loss": 1.509, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.60068857230506e-05, |
|
"loss": 1.4555, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.568699290912533e-05, |
|
"loss": 1.4588, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.536736430701088e-05, |
|
"loss": 1.4574, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.504800339014232e-05, |
|
"loss": 1.4805, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.472891362904577e-05, |
|
"loss": 1.5081, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 7.441009849130067e-05, |
|
"loss": 1.5081, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 7.409156144150213e-05, |
|
"loss": 1.4548, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 7.377330594122317e-05, |
|
"loss": 1.4478, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.34553354489773e-05, |
|
"loss": 1.5048, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.31376534201807e-05, |
|
"loss": 1.4889, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.282026330711489e-05, |
|
"loss": 1.5045, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.250316855888906e-05, |
|
"loss": 1.4352, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.218637262140268e-05, |
|
"loss": 1.4881, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.186987893730797e-05, |
|
"loss": 1.449, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.155369094597253e-05, |
|
"loss": 1.4146, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.1237812083442e-05, |
|
"loss": 1.4462, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.092224578240269e-05, |
|
"loss": 1.4509, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 7.060699547214427e-05, |
|
"loss": 1.4483, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.029206457852247e-05, |
|
"loss": 1.4348, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 6.997745652392191e-05, |
|
"loss": 1.4931, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 6.966317472721897e-05, |
|
"loss": 1.4132, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.934922260374437e-05, |
|
"loss": 1.3974, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.903560356524641e-05, |
|
"loss": 1.4326, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.872232101985363e-05, |
|
"loss": 1.4349, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 6.840937837203791e-05, |
|
"loss": 1.4528, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 6.809677902257742e-05, |
|
"loss": 1.4365, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 6.778452636851968e-05, |
|
"loss": 1.4702, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 6.747262380314463e-05, |
|
"loss": 1.458, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 6.71610747159277e-05, |
|
"loss": 1.5413, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 6.684988249250314e-05, |
|
"loss": 1.4205, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.653905051462708e-05, |
|
"loss": 1.4643, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.622858216014084e-05, |
|
"loss": 1.4071, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.591848080293418e-05, |
|
"loss": 1.4669, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.56087498129087e-05, |
|
"loss": 1.5062, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.52993925559412e-05, |
|
"loss": 1.4334, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.499041239384698e-05, |
|
"loss": 1.4696, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.468181268434354e-05, |
|
"loss": 1.4575, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.437359678101389e-05, |
|
"loss": 1.4432, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.406576803327022e-05, |
|
"loss": 1.5047, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.375832978631743e-05, |
|
"loss": 1.4297, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.345128538111685e-05, |
|
"loss": 1.461, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_loss": 1.6174333095550537, |
|
"eval_runtime": 233.649, |
|
"eval_samples_per_second": 16.362, |
|
"eval_steps_per_second": 4.092, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.314463815434988e-05, |
|
"loss": 1.4978, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.283839143838169e-05, |
|
"loss": 1.426, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.253254856122511e-05, |
|
"loss": 1.4657, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.222711284650444e-05, |
|
"loss": 1.5282, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.192208761341925e-05, |
|
"loss": 1.4897, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.161747617670839e-05, |
|
"loss": 1.4827, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.131328184661396e-05, |
|
"loss": 1.4507, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.100950792884533e-05, |
|
"loss": 1.4461, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.070615772454312e-05, |
|
"loss": 1.4187, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 6.040323453024351e-05, |
|
"loss": 1.4704, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 6.0100741637842316e-05, |
|
"loss": 1.4869, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 5.979868233455917e-05, |
|
"loss": 1.4657, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 5.949705990290186e-05, |
|
"loss": 1.4234, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 5.919587762063072e-05, |
|
"loss": 1.4519, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 5.889513876072283e-05, |
|
"loss": 1.4588, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 5.859484659133663e-05, |
|
"loss": 1.4867, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 5.829500437577626e-05, |
|
"loss": 1.5157, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 5.799561537245628e-05, |
|
"loss": 1.4492, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.769668283486607e-05, |
|
"loss": 1.514, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.739821001153451e-05, |
|
"loss": 1.5127, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.710020014599486e-05, |
|
"loss": 1.4204, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.680265647674925e-05, |
|
"loss": 1.4346, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.650558223723365e-05, |
|
"loss": 1.4342, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.620898065578268e-05, |
|
"loss": 1.4699, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.591285495559453e-05, |
|
"loss": 1.5088, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.561720835469602e-05, |
|
"loss": 1.5015, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.5322044065907475e-05, |
|
"loss": 1.4243, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.502736529680785e-05, |
|
"loss": 1.4553, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.47331752497001e-05, |
|
"loss": 1.4419, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.443947712157587e-05, |
|
"loss": 1.4172, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.41462741040814e-05, |
|
"loss": 1.4888, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.385356938348234e-05, |
|
"loss": 1.412, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.3561366140629274e-05, |
|
"loss": 1.4327, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.326966755092334e-05, |
|
"loss": 1.502, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.297847678428141e-05, |
|
"loss": 1.4499, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.2687797005101834e-05, |
|
"loss": 1.4783, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.239763137223004e-05, |
|
"loss": 1.4378, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.21079830389241e-05, |
|
"loss": 1.5055, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.18188551528207e-05, |
|
"loss": 1.4963, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.1530250855900576e-05, |
|
"loss": 1.4799, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.124217328445475e-05, |
|
"loss": 1.4388, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.095462556905021e-05, |
|
"loss": 1.484, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.0667610834495785e-05, |
|
"loss": 1.4811, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.03811321998086e-05, |
|
"loss": 1.2941, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 5.009519277817976e-05, |
|
"loss": 1.3975, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.9809795676940815e-05, |
|
"loss": 1.3432, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.952494399752976e-05, |
|
"loss": 1.3014, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.924064083545744e-05, |
|
"loss": 1.3491, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.8956889280274056e-05, |
|
"loss": 1.3238, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.8673692415535186e-05, |
|
"loss": 1.327, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_loss": 1.6340641975402832, |
|
"eval_runtime": 233.6965, |
|
"eval_samples_per_second": 16.359, |
|
"eval_steps_per_second": 4.091, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.83910533187688e-05, |
|
"loss": 1.3208, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.810897506144137e-05, |
|
"loss": 1.2936, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.782746070892472e-05, |
|
"loss": 1.323, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.754651332046274e-05, |
|
"loss": 1.3304, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.726613594913796e-05, |
|
"loss": 1.2426, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.698633164183853e-05, |
|
"loss": 1.2882, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.670710343922504e-05, |
|
"loss": 1.3273, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.6428454375697485e-05, |
|
"loss": 1.3391, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.615038747936237e-05, |
|
"loss": 1.3143, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.587290577199965e-05, |
|
"loss": 1.2846, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.559601226902998e-05, |
|
"loss": 1.2887, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.531970997948203e-05, |
|
"loss": 1.3239, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.504400190595958e-05, |
|
"loss": 1.3552, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.476889104460907e-05, |
|
"loss": 1.3554, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.4494380385086986e-05, |
|
"loss": 1.3333, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.422047291052728e-05, |
|
"loss": 1.3107, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.3947171597509176e-05, |
|
"loss": 1.3228, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.367447941602453e-05, |
|
"loss": 1.3224, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.3402399329445855e-05, |
|
"loss": 1.2844, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.3130934294493885e-05, |
|
"loss": 1.3352, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.286008726120543e-05, |
|
"loss": 1.3217, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.2589861172901634e-05, |
|
"loss": 1.2976, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.232025896615559e-05, |
|
"loss": 1.3108, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.2051283570760746e-05, |
|
"loss": 1.2893, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.178293790969883e-05, |
|
"loss": 1.3452, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.1515224899108164e-05, |
|
"loss": 1.332, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.1248147448252185e-05, |
|
"loss": 1.2998, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.098170845948736e-05, |
|
"loss": 1.2952, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.071591082823215e-05, |
|
"loss": 1.3512, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.045075744293525e-05, |
|
"loss": 1.3571, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.01862511850442e-05, |
|
"loss": 1.3415, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 3.992239492897429e-05, |
|
"loss": 1.3264, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 3.965919154207708e-05, |
|
"loss": 1.3013, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 3.939664388460932e-05, |
|
"loss": 1.369, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 3.913475480970193e-05, |
|
"loss": 1.2464, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 3.887352716332892e-05, |
|
"loss": 1.3162, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.861296378427656e-05, |
|
"loss": 1.3221, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.835306750411237e-05, |
|
"loss": 1.3219, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.8093841147154475e-05, |
|
"loss": 1.3446, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.783528753044093e-05, |
|
"loss": 1.3667, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.757740946369901e-05, |
|
"loss": 1.3098, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.732020974931471e-05, |
|
"loss": 1.3017, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.7063691182302304e-05, |
|
"loss": 1.3354, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.680785655027399e-05, |
|
"loss": 1.3081, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.6552708633409613e-05, |
|
"loss": 1.2563, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.6298250204426334e-05, |
|
"loss": 1.307, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.6044484028548676e-05, |
|
"loss": 1.2907, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.5791412863478326e-05, |
|
"loss": 1.3023, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.553903945936421e-05, |
|
"loss": 1.3144, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.528736655877264e-05, |
|
"loss": 1.3015, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"eval_loss": 1.6665308475494385, |
|
"eval_runtime": 233.6943, |
|
"eval_samples_per_second": 16.359, |
|
"eval_steps_per_second": 4.091, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.5036396896657455e-05, |
|
"loss": 1.2943, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.478613320033042e-05, |
|
"loss": 1.3333, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.453657818943142e-05, |
|
"loss": 1.2983, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.4287734575898975e-05, |
|
"loss": 1.3392, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.403960506394092e-05, |
|
"loss": 1.2677, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.379219235000463e-05, |
|
"loss": 1.3197, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.3545499122748216e-05, |
|
"loss": 1.3343, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.329952806301092e-05, |
|
"loss": 1.3591, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.305428184378413e-05, |
|
"loss": 1.3272, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.280976313018239e-05, |
|
"loss": 1.3499, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.256597457941429e-05, |
|
"loss": 1.3371, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.232291884075373e-05, |
|
"loss": 1.312, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.208059855551101e-05, |
|
"loss": 1.3502, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.18390163570042e-05, |
|
"loss": 1.3094, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.1598174870530604e-05, |
|
"loss": 1.3181, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.1358076713338014e-05, |
|
"loss": 1.3011, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.1118724494596405e-05, |
|
"loss": 1.3054, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.0880120815369694e-05, |
|
"loss": 1.3215, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.0642268268587136e-05, |
|
"loss": 1.2908, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.0405169439015557e-05, |
|
"loss": 1.3334, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.0168826903230906e-05, |
|
"loss": 1.3275, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.9933243229590568e-05, |
|
"loss": 1.3329, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.969842097820519e-05, |
|
"loss": 1.3185, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.9464362700910943e-05, |
|
"loss": 1.3443, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.9231070941241988e-05, |
|
"loss": 1.3034, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.899854823440241e-05, |
|
"loss": 1.304, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.8766797107239164e-05, |
|
"loss": 1.3136, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.8535820078214236e-05, |
|
"loss": 1.2894, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.8305619657377413e-05, |
|
"loss": 1.3303, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.8076198346339113e-05, |
|
"loss": 1.3158, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.7847558638242964e-05, |
|
"loss": 1.3071, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.7619703017738917e-05, |
|
"loss": 1.2951, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.7392633960956127e-05, |
|
"loss": 1.3138, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.7166353935476085e-05, |
|
"loss": 1.3523, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.694086540030587e-05, |
|
"loss": 1.2937, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.671617080585127e-05, |
|
"loss": 1.3493, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.6492272593890267e-05, |
|
"loss": 1.309, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.6269173197546527e-05, |
|
"loss": 1.3188, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.6046875041262852e-05, |
|
"loss": 1.3202, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.5825380540774914e-05, |
|
"loss": 1.359, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.560469210308497e-05, |
|
"loss": 1.2837, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.5384812126435697e-05, |
|
"loss": 1.3195, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.5165743000284213e-05, |
|
"loss": 1.2797, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.4947487105275945e-05, |
|
"loss": 1.3656, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.4730046813218987e-05, |
|
"loss": 1.3094, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.451342448705811e-05, |
|
"loss": 1.3176, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.4297622480849104e-05, |
|
"loss": 1.3318, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.408264313973343e-05, |
|
"loss": 1.3367, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.3868488799912414e-05, |
|
"loss": 1.2717, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.3655161788622138e-05, |
|
"loss": 1.3328, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 1.6713805198669434, |
|
"eval_runtime": 233.7116, |
|
"eval_samples_per_second": 16.358, |
|
"eval_steps_per_second": 4.091, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.344266442410794e-05, |
|
"loss": 1.3325, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.323099901559931e-05, |
|
"loss": 1.3277, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.302016786328488e-05, |
|
"loss": 1.3567, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.281017325828716e-05, |
|
"loss": 1.3087, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.260101748263803e-05, |
|
"loss": 1.3173, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.2392702809253596e-05, |
|
"loss": 1.3234, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.218523150190962e-05, |
|
"loss": 1.3649, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.1978605815217025e-05, |
|
"loss": 1.3433, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.177282799459719e-05, |
|
"loss": 1.2992, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.1567900276257703e-05, |
|
"loss": 1.3004, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.1363824887167993e-05, |
|
"loss": 1.2894, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.1160604045035115e-05, |
|
"loss": 1.3151, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.0958239958279756e-05, |
|
"loss": 1.2694, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 2.0756734826012104e-05, |
|
"loss": 1.2979, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 2.0556090838007957e-05, |
|
"loss": 1.3187, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 2.0356310174685124e-05, |
|
"loss": 1.3255, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 2.0157395007079428e-05, |
|
"loss": 1.3623, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.9959347496821333e-05, |
|
"loss": 1.317, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.9762169796112397e-05, |
|
"loss": 1.3102, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.956586404770182e-05, |
|
"loss": 1.244, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.937043238486329e-05, |
|
"loss": 1.3051, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.9175876931371626e-05, |
|
"loss": 1.2869, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.898219980147993e-05, |
|
"loss": 1.3365, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.878940309989633e-05, |
|
"loss": 1.3091, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.859748892176133e-05, |
|
"loss": 1.3401, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.840645935262497e-05, |
|
"loss": 1.3562, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.8216316468424098e-05, |
|
"loss": 1.3201, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.8027062335459977e-05, |
|
"loss": 1.2757, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.7838699010375625e-05, |
|
"loss": 1.3541, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.7651228540133623e-05, |
|
"loss": 1.3491, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.7464652961993768e-05, |
|
"loss": 1.2903, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.727897430349097e-05, |
|
"loss": 1.3879, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.7094194582413326e-05, |
|
"loss": 1.3311, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.6910315806779987e-05, |
|
"loss": 1.34, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.6727339974819456e-05, |
|
"loss": 1.3331, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.6545269074947922e-05, |
|
"loss": 1.3164, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.636410508574753e-05, |
|
"loss": 1.3505, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.618384997594494e-05, |
|
"loss": 1.2556, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.6004505704389983e-05, |
|
"loss": 1.3023, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.5826074220034226e-05, |
|
"loss": 1.3524, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.5648557461910018e-05, |
|
"loss": 1.3215, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.547195735910919e-05, |
|
"loss": 1.3593, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.5296275830762206e-05, |
|
"loss": 1.3482, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.5121514786017365e-05, |
|
"loss": 1.3521, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.4947676124019839e-05, |
|
"loss": 1.3138, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.4774761733891319e-05, |
|
"loss": 1.3701, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.4602773494709254e-05, |
|
"loss": 1.3408, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.4431713275486602e-05, |
|
"loss": 1.343, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.4261582935151352e-05, |
|
"loss": 1.2744, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.4092384322526442e-05, |
|
"loss": 1.3453, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"eval_loss": 1.6718111038208008, |
|
"eval_runtime": 233.7605, |
|
"eval_samples_per_second": 16.354, |
|
"eval_steps_per_second": 4.09, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.3924119276309677e-05, |
|
"loss": 1.2647, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.3756789625053601e-05, |
|
"loss": 1.321, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.3590397187145853e-05, |
|
"loss": 1.3403, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.3424943770789211e-05, |
|
"loss": 1.3191, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.3260431173982001e-05, |
|
"loss": 1.2983, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.3096861184498643e-05, |
|
"loss": 1.2955, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.293423557987009e-05, |
|
"loss": 1.3297, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.2772556127364588e-05, |
|
"loss": 1.3273, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.2611824583968457e-05, |
|
"loss": 1.2867, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.2452042696366984e-05, |
|
"loss": 1.3132, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.229321220092552e-05, |
|
"loss": 1.323, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.2135334823670452e-05, |
|
"loss": 1.3332, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.1978412280270568e-05, |
|
"loss": 1.2775, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.182244627601845e-05, |
|
"loss": 1.3049, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.1667438505811801e-05, |
|
"loss": 1.3206, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.1513390654135103e-05, |
|
"loss": 1.386, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.1360304395041343e-05, |
|
"loss": 1.3292, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.1208181392133766e-05, |
|
"loss": 1.3249, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.1057023298547864e-05, |
|
"loss": 1.2934, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.0906831756933267e-05, |
|
"loss": 1.3471, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.0757608399436125e-05, |
|
"loss": 1.3505, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.0609354847681152e-05, |
|
"loss": 1.283, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.0462072712754035e-05, |
|
"loss": 1.2679, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.0315763595184113e-05, |
|
"loss": 1.3317, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.0170429084926746e-05, |
|
"loss": 1.308, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.0026070761346229e-05, |
|
"loss": 1.2816, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 9.882690193198463e-06, |
|
"loss": 1.2712, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 9.740288938613995e-06, |
|
"loss": 1.3133, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 9.598868545081153e-06, |
|
"loss": 1.257, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 9.458430549429032e-06, |
|
"loss": 1.3271, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 9.318976477811026e-06, |
|
"loss": 1.3329, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 9.18050784568808e-06, |
|
"loss": 1.2939, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 9.043026157812229e-06, |
|
"loss": 1.3111, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 8.906532908210396e-06, |
|
"loss": 1.3164, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 8.771029580167967e-06, |
|
"loss": 1.3162, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 8.636517646212761e-06, |
|
"loss": 1.303, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 8.502998568099063e-06, |
|
"loss": 1.3545, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.370473796791622e-06, |
|
"loss": 1.3224, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.238944772450064e-06, |
|
"loss": 1.3146, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.108412924413056e-06, |
|
"loss": 1.3171, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 7.978879671182848e-06, |
|
"loss": 1.3209, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 7.850346420409949e-06, |
|
"loss": 1.3143, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 7.722814568877646e-06, |
|
"loss": 1.3112, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.596285502486966e-06, |
|
"loss": 1.3056, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.4707605962415775e-06, |
|
"loss": 1.3151, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.346241214232819e-06, |
|
"loss": 1.3774, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.222728709624949e-06, |
|
"loss": 1.3432, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 7.100224424640312e-06, |
|
"loss": 1.3036, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.978729690544927e-06, |
|
"loss": 1.2911, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.858245827633869e-06, |
|
"loss": 1.3458, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"eval_loss": 1.6725014448165894, |
|
"eval_runtime": 233.7534, |
|
"eval_samples_per_second": 16.355, |
|
"eval_steps_per_second": 4.09, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 6.7387741452169415e-06, |
|
"loss": 1.2943, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 6.6203159416045605e-06, |
|
"loss": 1.3108, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 6.502872504093527e-06, |
|
"loss": 1.2836, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.3864451089530985e-06, |
|
"loss": 1.3342, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.271035021411098e-06, |
|
"loss": 1.304, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.156643495640157e-06, |
|
"loss": 1.3163, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 6.043271774744086e-06, |
|
"loss": 1.3385, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 5.930921090744402e-06, |
|
"loss": 1.2856, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 5.81959266456692e-06, |
|
"loss": 1.3414, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 5.709287706028454e-06, |
|
"loss": 1.3353, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.600007413823693e-06, |
|
"loss": 1.3286, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.491752975512232e-06, |
|
"loss": 1.3089, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.38452556750555e-06, |
|
"loss": 1.3385, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.278326355054308e-06, |
|
"loss": 1.34, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.173156492235665e-06, |
|
"loss": 1.328, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.069017121940733e-06, |
|
"loss": 1.2771, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.96590937586221e-06, |
|
"loss": 1.2927, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.863834374481946e-06, |
|
"loss": 1.2976, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.762793227058915e-06, |
|
"loss": 1.3291, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.662787031617122e-06, |
|
"loss": 1.3162, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.563816874933547e-06, |
|
"loss": 1.287, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.465883832526552e-06, |
|
"loss": 1.3419, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.368988968644006e-06, |
|
"loss": 1.3645, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.2731333362518e-06, |
|
"loss": 1.3361, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.1783179770224275e-06, |
|
"loss": 1.3006, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.084543921323591e-06, |
|
"loss": 1.2943, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 3.991812188207112e-06, |
|
"loss": 1.3161, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 3.90012378539768e-06, |
|
"loss": 1.3414, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 3.8094797092821264e-06, |
|
"loss": 1.3094, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 3.7198809448984128e-06, |
|
"loss": 1.2949, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 3.6313284659250215e-06, |
|
"loss": 1.3336, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 3.5438232346703627e-06, |
|
"loss": 1.3238, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.457366202062284e-06, |
|
"loss": 1.3209, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.371958307637746e-06, |
|
"loss": 1.352, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.287600479532649e-06, |
|
"loss": 1.3234, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.204293634471689e-06, |
|
"loss": 1.2995, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.1220386777584764e-06, |
|
"loss": 1.3228, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.0408365032656093e-06, |
|
"loss": 1.3059, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.960687993425004e-06, |
|
"loss": 1.2848, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.8815940192183033e-06, |
|
"loss": 1.3639, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.803555440167427e-06, |
|
"loss": 1.3454, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.7265731043251807e-06, |
|
"loss": 1.317, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 2.6506478482661077e-06, |
|
"loss": 1.3105, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 2.575780497077307e-06, |
|
"loss": 1.2993, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 2.501971864349606e-06, |
|
"loss": 1.3332, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.429222752168547e-06, |
|
"loss": 1.3281, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.357533951105839e-06, |
|
"loss": 1.3206, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.28690624021064e-06, |
|
"loss": 1.3227, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.217340387001121e-06, |
|
"loss": 1.3449, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.1488371474562063e-06, |
|
"loss": 1.3016, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"eval_loss": 1.6737442016601562, |
|
"eval_runtime": 233.8114, |
|
"eval_samples_per_second": 16.351, |
|
"eval_steps_per_second": 4.089, |
|
"step": 900 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 963, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"total_flos": 2.5238444917653504e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|