{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.05800398066533978, "eval_steps": 34, "global_step": 204, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0002843332385555871, "eval_loss": 1.5018354654312134, "eval_runtime": 166.295, "eval_samples_per_second": 35.623, "eval_steps_per_second": 4.456, "step": 1 }, { "epoch": 0.0008529997156667614, "grad_norm": 0.7210565209388733, "learning_rate": 1.5e-05, "loss": 1.5223, "step": 3 }, { "epoch": 0.0017059994313335229, "grad_norm": 0.6832892298698425, "learning_rate": 3e-05, "loss": 1.5221, "step": 6 }, { "epoch": 0.0025589991470002845, "grad_norm": 0.7228020429611206, "learning_rate": 4.5e-05, "loss": 1.408, "step": 9 }, { "epoch": 0.0034119988626670457, "grad_norm": 0.6384875774383545, "learning_rate": 4.999675562428437e-05, "loss": 1.4404, "step": 12 }, { "epoch": 0.004264998578333807, "grad_norm": 0.3351251482963562, "learning_rate": 4.9979724954289244e-05, "loss": 1.3736, "step": 15 }, { "epoch": 0.005117998294000569, "grad_norm": 0.182565838098526, "learning_rate": 4.994810682835951e-05, "loss": 1.3079, "step": 18 }, { "epoch": 0.00597099800966733, "grad_norm": 0.19113591313362122, "learning_rate": 4.990191971059033e-05, "loss": 1.316, "step": 21 }, { "epoch": 0.0068239977253340914, "grad_norm": 0.1948336660861969, "learning_rate": 4.984119057295783e-05, "loss": 1.2426, "step": 24 }, { "epoch": 0.007676997441000853, "grad_norm": 0.18684643507003784, "learning_rate": 4.976595487956823e-05, "loss": 1.2503, "step": 27 }, { "epoch": 0.008529997156667614, "grad_norm": 0.18414735794067383, "learning_rate": 4.967625656594782e-05, "loss": 1.2323, "step": 30 }, { "epoch": 0.009382996872334376, "grad_norm": 0.17294025421142578, "learning_rate": 4.957214801338581e-05, "loss": 1.1942, "step": 33 }, { "epoch": 0.009667330110889964, "eval_loss": 1.1813108921051025, "eval_runtime": 167.7438, "eval_samples_per_second": 35.316, "eval_steps_per_second": 4.417, "step": 34 }, { "epoch": 0.010235996588001138, "grad_norm": 0.16702412068843842, "learning_rate": 4.9453690018345144e-05, "loss": 1.1981, "step": 36 }, { "epoch": 0.011088996303667898, "grad_norm": 0.1968175172805786, "learning_rate": 4.932095175695911e-05, "loss": 1.1675, "step": 39 }, { "epoch": 0.01194199601933466, "grad_norm": 0.18244469165802002, "learning_rate": 4.917401074463441e-05, "loss": 1.1584, "step": 42 }, { "epoch": 0.01279499573500142, "grad_norm": 0.16749081015586853, "learning_rate": 4.901295279078431e-05, "loss": 1.1134, "step": 45 }, { "epoch": 0.013647995450668183, "grad_norm": 0.17398597300052643, "learning_rate": 4.883787194871841e-05, "loss": 1.1139, "step": 48 }, { "epoch": 0.014500995166334945, "grad_norm": 0.17164087295532227, "learning_rate": 4.864887046071813e-05, "loss": 1.079, "step": 51 }, { "epoch": 0.015353994882001705, "grad_norm": 0.1644001007080078, "learning_rate": 4.8446058698330115e-05, "loss": 1.0646, "step": 54 }, { "epoch": 0.01620699459766847, "grad_norm": 0.16490623354911804, "learning_rate": 4.822955509791233e-05, "loss": 1.0739, "step": 57 }, { "epoch": 0.017059994313335228, "grad_norm": 0.17708458006381989, "learning_rate": 4.799948609147061e-05, "loss": 1.0897, "step": 60 }, { "epoch": 0.01791299402900199, "grad_norm": 0.15597032010555267, "learning_rate": 4.7755986032825864e-05, "loss": 1.0566, "step": 63 }, { "epoch": 0.018765993744668752, "grad_norm": 0.17728550732135773, "learning_rate": 4.74991971191553e-05, "loss": 1.0275, "step": 66 }, { "epoch": 0.019334660221779928, "eval_loss": 1.0011852979660034, "eval_runtime": 168.1174, "eval_samples_per_second": 35.237, "eval_steps_per_second": 4.408, "step": 68 }, { "epoch": 0.019618993460335514, "grad_norm": 0.16994404792785645, "learning_rate": 4.7229269307953235e-05, "loss": 0.9841, "step": 69 }, { "epoch": 0.020471993176002276, "grad_norm": 0.17181497812271118, "learning_rate": 4.694636022946012e-05, "loss": 0.9944, "step": 72 }, { "epoch": 0.021324992891669035, "grad_norm": 0.21411621570587158, "learning_rate": 4.665063509461097e-05, "loss": 1.0013, "step": 75 }, { "epoch": 0.022177992607335797, "grad_norm": 0.19594340026378632, "learning_rate": 4.6342266598556814e-05, "loss": 0.9969, "step": 78 }, { "epoch": 0.02303099232300256, "grad_norm": 0.2057276964187622, "learning_rate": 4.6021434819815555e-05, "loss": 0.9808, "step": 81 }, { "epoch": 0.02388399203866932, "grad_norm": 0.21778051555156708, "learning_rate": 4.568832711511125e-05, "loss": 0.9456, "step": 84 }, { "epoch": 0.024736991754336083, "grad_norm": 0.21349306404590607, "learning_rate": 4.534313800996299e-05, "loss": 0.953, "step": 87 }, { "epoch": 0.02558999147000284, "grad_norm": 0.21818219125270844, "learning_rate": 4.498606908508754e-05, "loss": 0.9133, "step": 90 }, { "epoch": 0.026442991185669604, "grad_norm": 0.2510223388671875, "learning_rate": 4.46173288586818e-05, "loss": 0.9125, "step": 93 }, { "epoch": 0.027295990901336366, "grad_norm": 0.24031595885753632, "learning_rate": 4.4237132664654154e-05, "loss": 0.8784, "step": 96 }, { "epoch": 0.028148990617003128, "grad_norm": 0.2543729543685913, "learning_rate": 4.384570252687542e-05, "loss": 0.8984, "step": 99 }, { "epoch": 0.02900199033266989, "grad_norm": 0.27020716667175293, "learning_rate": 4.344326702952326e-05, "loss": 0.8719, "step": 102 }, { "epoch": 0.02900199033266989, "eval_loss": 0.8574855923652649, "eval_runtime": 168.0651, "eval_samples_per_second": 35.248, "eval_steps_per_second": 4.409, "step": 102 }, { "epoch": 0.029854990048336652, "grad_norm": 0.2670081853866577, "learning_rate": 4.303006118359537e-05, "loss": 0.8789, "step": 105 }, { "epoch": 0.03070798976400341, "grad_norm": 0.3037591874599457, "learning_rate": 4.260632628966974e-05, "loss": 0.8526, "step": 108 }, { "epoch": 0.031560989479670176, "grad_norm": 0.28440290689468384, "learning_rate": 4.217230979699188e-05, "loss": 0.8274, "step": 111 }, { "epoch": 0.03241398919533694, "grad_norm": 0.49433842301368713, "learning_rate": 4.172826515897146e-05, "loss": 0.8104, "step": 114 }, { "epoch": 0.03326698891100369, "grad_norm": 0.2911880314350128, "learning_rate": 4.12744516851726e-05, "loss": 0.8392, "step": 117 }, { "epoch": 0.034119988626670456, "grad_norm": 0.3204285502433777, "learning_rate": 4.0811134389884433e-05, "loss": 0.788, "step": 120 }, { "epoch": 0.03497298834233722, "grad_norm": 0.35650599002838135, "learning_rate": 4.0338583837360225e-05, "loss": 0.8004, "step": 123 }, { "epoch": 0.03582598805800398, "grad_norm": 0.310162216424942, "learning_rate": 3.985707598381544e-05, "loss": 0.8085, "step": 126 }, { "epoch": 0.03667898777367074, "grad_norm": 0.3370768129825592, "learning_rate": 3.9366892016277096e-05, "loss": 0.7317, "step": 129 }, { "epoch": 0.037531987489337504, "grad_norm": 0.4600171446800232, "learning_rate": 3.886831818837847e-05, "loss": 0.7626, "step": 132 }, { "epoch": 0.038384987205004266, "grad_norm": 0.43600377440452576, "learning_rate": 3.8361645653195026e-05, "loss": 0.7679, "step": 135 }, { "epoch": 0.038669320443559856, "eval_loss": 0.7138456106185913, "eval_runtime": 168.0686, "eval_samples_per_second": 35.248, "eval_steps_per_second": 4.409, "step": 136 }, { "epoch": 0.03923798692067103, "grad_norm": 0.4538515508174896, "learning_rate": 3.784717029321922e-05, "loss": 0.7585, "step": 138 }, { "epoch": 0.04009098663633779, "grad_norm": 0.5923985838890076, "learning_rate": 3.732519254757344e-05, "loss": 0.7211, "step": 141 }, { "epoch": 0.04094398635200455, "grad_norm": 0.36592337489128113, "learning_rate": 3.679601723656205e-05, "loss": 0.6604, "step": 144 }, { "epoch": 0.04179698606767131, "grad_norm": 0.44677990674972534, "learning_rate": 3.625995338366492e-05, "loss": 0.7846, "step": 147 }, { "epoch": 0.04264998578333807, "grad_norm": 0.4921860098838806, "learning_rate": 3.5717314035076355e-05, "loss": 0.6709, "step": 150 }, { "epoch": 0.04350298549900483, "grad_norm": 0.5457685589790344, "learning_rate": 3.516841607689501e-05, "loss": 0.6974, "step": 153 }, { "epoch": 0.044355985214671594, "grad_norm": 0.46299344301223755, "learning_rate": 3.461358005007128e-05, "loss": 0.6643, "step": 156 }, { "epoch": 0.045208984930338356, "grad_norm": 0.44877251982688904, "learning_rate": 3.405312996322042e-05, "loss": 0.6035, "step": 159 }, { "epoch": 0.04606198464600512, "grad_norm": 0.3635808527469635, "learning_rate": 3.348739310341068e-05, "loss": 0.6781, "step": 162 }, { "epoch": 0.04691498436167188, "grad_norm": 0.4172018766403198, "learning_rate": 3.2916699845036816e-05, "loss": 0.6195, "step": 165 }, { "epoch": 0.04776798407733864, "grad_norm": 0.36372795701026917, "learning_rate": 3.234138345689077e-05, "loss": 0.6599, "step": 168 }, { "epoch": 0.048336650554449814, "eval_loss": 0.606797456741333, "eval_runtime": 168.0542, "eval_samples_per_second": 35.251, "eval_steps_per_second": 4.409, "step": 170 }, { "epoch": 0.048620983793005404, "grad_norm": 0.4114479720592499, "learning_rate": 3.17617799075421e-05, "loss": 0.6287, "step": 171 }, { "epoch": 0.049473983508672166, "grad_norm": 0.36733385920524597, "learning_rate": 3.1178227669141744e-05, "loss": 0.621, "step": 174 }, { "epoch": 0.05032698322433893, "grad_norm": 0.3716995418071747, "learning_rate": 3.0591067519763895e-05, "loss": 0.5532, "step": 177 }, { "epoch": 0.05117998294000568, "grad_norm": 0.4997945725917816, "learning_rate": 3.0000642344401113e-05, "loss": 0.6024, "step": 180 }, { "epoch": 0.052032982655672445, "grad_norm": 0.43555817008018494, "learning_rate": 2.9407296934729227e-05, "loss": 0.5092, "step": 183 }, { "epoch": 0.05288598237133921, "grad_norm": 0.49766504764556885, "learning_rate": 2.8811377787758636e-05, "loss": 0.6556, "step": 186 }, { "epoch": 0.05373898208700597, "grad_norm": 0.6171467304229736, "learning_rate": 2.8213232903489865e-05, "loss": 0.5736, "step": 189 }, { "epoch": 0.05459198180267273, "grad_norm": 0.8651450276374817, "learning_rate": 2.761321158169134e-05, "loss": 0.5568, "step": 192 }, { "epoch": 0.055444981518339494, "grad_norm": 0.4486936926841736, "learning_rate": 2.7011664217918154e-05, "loss": 0.6145, "step": 195 }, { "epoch": 0.056297981234006256, "grad_norm": 0.5348999500274658, "learning_rate": 2.6408942098890936e-05, "loss": 0.591, "step": 198 }, { "epoch": 0.05715098094967302, "grad_norm": 0.4498997628688812, "learning_rate": 2.580539719735433e-05, "loss": 0.5572, "step": 201 }, { "epoch": 0.05800398066533978, "grad_norm": 0.5493082404136658, "learning_rate": 2.5201381966534748e-05, "loss": 0.5316, "step": 204 }, { "epoch": 0.05800398066533978, "eval_loss": 0.5341343879699707, "eval_runtime": 167.9533, "eval_samples_per_second": 35.272, "eval_steps_per_second": 4.412, "step": 204 } ], "logging_steps": 3, "max_steps": 400, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 34, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 6.532800359222477e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }