{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.03443921478590288, "eval_steps": 25, "global_step": 75, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0004591895304787051, "grad_norm": 1.1419830322265625, "learning_rate": 3.3333333333333335e-05, "loss": 1.0269, "step": 1 }, { "epoch": 0.0004591895304787051, "eval_loss": 1.300444483757019, "eval_runtime": 461.8779, "eval_samples_per_second": 3.971, "eval_steps_per_second": 1.985, "step": 1 }, { "epoch": 0.0009183790609574102, "grad_norm": 1.1880234479904175, "learning_rate": 6.666666666666667e-05, "loss": 1.036, "step": 2 }, { "epoch": 0.0013775685914361152, "grad_norm": 1.1058253049850464, "learning_rate": 0.0001, "loss": 1.0533, "step": 3 }, { "epoch": 0.0018367581219148204, "grad_norm": 0.8739814758300781, "learning_rate": 9.99524110790929e-05, "loss": 1.0222, "step": 4 }, { "epoch": 0.0022959476523935255, "grad_norm": 1.1297332048416138, "learning_rate": 9.980973490458728e-05, "loss": 1.0771, "step": 5 }, { "epoch": 0.0027551371828722303, "grad_norm": 1.0218960046768188, "learning_rate": 9.957224306869053e-05, "loss": 0.9766, "step": 6 }, { "epoch": 0.0032143267133509356, "grad_norm": 0.9368105530738831, "learning_rate": 9.924038765061042e-05, "loss": 0.9208, "step": 7 }, { "epoch": 0.003673516243829641, "grad_norm": 0.7900261878967285, "learning_rate": 9.881480035599667e-05, "loss": 0.9225, "step": 8 }, { "epoch": 0.004132705774308346, "grad_norm": 0.7937513589859009, "learning_rate": 9.829629131445342e-05, "loss": 0.8457, "step": 9 }, { "epoch": 0.004591895304787051, "grad_norm": 0.7547521591186523, "learning_rate": 9.768584753741134e-05, "loss": 0.834, "step": 10 }, { "epoch": 0.005051084835265756, "grad_norm": 0.7325078845024109, "learning_rate": 9.698463103929542e-05, "loss": 0.8298, "step": 11 }, { "epoch": 0.005510274365744461, "grad_norm": 0.6860811114311218, "learning_rate": 9.619397662556435e-05, "loss": 0.8015, "step": 12 }, { "epoch": 0.005969463896223166, "grad_norm": 0.6838340759277344, "learning_rate": 9.53153893518325e-05, "loss": 0.7436, "step": 13 }, { "epoch": 0.006428653426701871, "grad_norm": 0.7004528045654297, "learning_rate": 9.435054165891109e-05, "loss": 0.7086, "step": 14 }, { "epoch": 0.006887842957180576, "grad_norm": 0.6048188209533691, "learning_rate": 9.330127018922194e-05, "loss": 0.7169, "step": 15 }, { "epoch": 0.007347032487659282, "grad_norm": 0.5844050049781799, "learning_rate": 9.21695722906443e-05, "loss": 0.707, "step": 16 }, { "epoch": 0.007806222018137987, "grad_norm": 0.5827935934066772, "learning_rate": 9.09576022144496e-05, "loss": 0.6838, "step": 17 }, { "epoch": 0.008265411548616692, "grad_norm": 0.6372345089912415, "learning_rate": 8.966766701456177e-05, "loss": 0.6679, "step": 18 }, { "epoch": 0.008724601079095397, "grad_norm": 0.6254321336746216, "learning_rate": 8.83022221559489e-05, "loss": 0.6719, "step": 19 }, { "epoch": 0.009183790609574102, "grad_norm": 0.6155533790588379, "learning_rate": 8.68638668405062e-05, "loss": 0.7132, "step": 20 }, { "epoch": 0.009642980140052807, "grad_norm": 0.5951682329177856, "learning_rate": 8.535533905932738e-05, "loss": 0.6667, "step": 21 }, { "epoch": 0.010102169670531512, "grad_norm": 0.5539524555206299, "learning_rate": 8.377951038078302e-05, "loss": 0.6211, "step": 22 }, { "epoch": 0.010561359201010216, "grad_norm": 0.5512757897377014, "learning_rate": 8.213938048432697e-05, "loss": 0.6512, "step": 23 }, { "epoch": 0.011020548731488921, "grad_norm": 0.5660426616668701, "learning_rate": 8.043807145043604e-05, "loss": 0.6527, "step": 24 }, { "epoch": 0.011479738261967628, "grad_norm": 0.5754444003105164, "learning_rate": 7.86788218175523e-05, "loss": 0.6099, "step": 25 }, { "epoch": 0.011479738261967628, "eval_loss": 0.6897526979446411, "eval_runtime": 464.8934, "eval_samples_per_second": 3.945, "eval_steps_per_second": 1.972, "step": 25 }, { "epoch": 0.011938927792446333, "grad_norm": 0.5573870539665222, "learning_rate": 7.68649804173412e-05, "loss": 0.5653, "step": 26 }, { "epoch": 0.012398117322925038, "grad_norm": 0.5551594495773315, "learning_rate": 7.500000000000001e-05, "loss": 0.6139, "step": 27 }, { "epoch": 0.012857306853403742, "grad_norm": 0.593977153301239, "learning_rate": 7.308743066175172e-05, "loss": 0.6182, "step": 28 }, { "epoch": 0.013316496383882447, "grad_norm": 0.5621492266654968, "learning_rate": 7.113091308703498e-05, "loss": 0.5892, "step": 29 }, { "epoch": 0.013775685914361152, "grad_norm": 0.5908278822898865, "learning_rate": 6.91341716182545e-05, "loss": 0.5825, "step": 30 }, { "epoch": 0.014234875444839857, "grad_norm": 0.6430060863494873, "learning_rate": 6.710100716628344e-05, "loss": 0.6062, "step": 31 }, { "epoch": 0.014694064975318563, "grad_norm": 0.545280933380127, "learning_rate": 6.503528997521366e-05, "loss": 0.5722, "step": 32 }, { "epoch": 0.015153254505797268, "grad_norm": 0.652656614780426, "learning_rate": 6.294095225512603e-05, "loss": 0.6536, "step": 33 }, { "epoch": 0.015612444036275973, "grad_norm": 0.6264194250106812, "learning_rate": 6.0821980696905146e-05, "loss": 0.5823, "step": 34 }, { "epoch": 0.016071633566754676, "grad_norm": 0.62241530418396, "learning_rate": 5.868240888334653e-05, "loss": 0.5793, "step": 35 }, { "epoch": 0.016530823097233385, "grad_norm": 0.6142162084579468, "learning_rate": 5.6526309611002594e-05, "loss": 0.5762, "step": 36 }, { "epoch": 0.01699001262771209, "grad_norm": 0.5530565977096558, "learning_rate": 5.435778713738292e-05, "loss": 0.6092, "step": 37 }, { "epoch": 0.017449202158190794, "grad_norm": 0.6179258823394775, "learning_rate": 5.218096936826681e-05, "loss": 0.5751, "step": 38 }, { "epoch": 0.0179083916886695, "grad_norm": 0.6282462477684021, "learning_rate": 5e-05, "loss": 0.5948, "step": 39 }, { "epoch": 0.018367581219148204, "grad_norm": 0.6314778923988342, "learning_rate": 4.781903063173321e-05, "loss": 0.5389, "step": 40 }, { "epoch": 0.01882677074962691, "grad_norm": 0.7459428310394287, "learning_rate": 4.564221286261709e-05, "loss": 0.6414, "step": 41 }, { "epoch": 0.019285960280105614, "grad_norm": 0.725232720375061, "learning_rate": 4.347369038899744e-05, "loss": 0.6711, "step": 42 }, { "epoch": 0.01974514981058432, "grad_norm": 0.6939031481742859, "learning_rate": 4.131759111665349e-05, "loss": 0.5777, "step": 43 }, { "epoch": 0.020204339341063023, "grad_norm": 0.7400466799736023, "learning_rate": 3.917801930309486e-05, "loss": 0.641, "step": 44 }, { "epoch": 0.020663528871541728, "grad_norm": 0.7789513468742371, "learning_rate": 3.705904774487396e-05, "loss": 0.7023, "step": 45 }, { "epoch": 0.021122718402020433, "grad_norm": 0.7750358581542969, "learning_rate": 3.4964710024786354e-05, "loss": 0.7727, "step": 46 }, { "epoch": 0.021581907932499138, "grad_norm": 0.8089461922645569, "learning_rate": 3.289899283371657e-05, "loss": 0.749, "step": 47 }, { "epoch": 0.022041097462977843, "grad_norm": 0.8801223635673523, "learning_rate": 3.086582838174551e-05, "loss": 0.739, "step": 48 }, { "epoch": 0.022500286993456547, "grad_norm": 0.9937826991081238, "learning_rate": 2.886908691296504e-05, "loss": 0.7846, "step": 49 }, { "epoch": 0.022959476523935256, "grad_norm": 1.058200716972351, "learning_rate": 2.6912569338248315e-05, "loss": 0.7814, "step": 50 }, { "epoch": 0.022959476523935256, "eval_loss": 0.6795315742492676, "eval_runtime": 464.6954, "eval_samples_per_second": 3.947, "eval_steps_per_second": 1.973, "step": 50 }, { "epoch": 0.02341866605441396, "grad_norm": 1.049940824508667, "learning_rate": 2.500000000000001e-05, "loss": 0.8132, "step": 51 }, { "epoch": 0.023877855584892665, "grad_norm": 1.0071134567260742, "learning_rate": 2.3135019582658802e-05, "loss": 0.7913, "step": 52 }, { "epoch": 0.02433704511537137, "grad_norm": 0.8402441740036011, "learning_rate": 2.132117818244771e-05, "loss": 0.7837, "step": 53 }, { "epoch": 0.024796234645850075, "grad_norm": 0.7800126075744629, "learning_rate": 1.9561928549563968e-05, "loss": 0.7519, "step": 54 }, { "epoch": 0.02525542417632878, "grad_norm": 0.6317635774612427, "learning_rate": 1.7860619515673033e-05, "loss": 0.7225, "step": 55 }, { "epoch": 0.025714613706807485, "grad_norm": 0.5558218359947205, "learning_rate": 1.622048961921699e-05, "loss": 0.7201, "step": 56 }, { "epoch": 0.02617380323728619, "grad_norm": 0.564495325088501, "learning_rate": 1.4644660940672627e-05, "loss": 0.7005, "step": 57 }, { "epoch": 0.026632992767764894, "grad_norm": 0.4901668131351471, "learning_rate": 1.3136133159493802e-05, "loss": 0.7271, "step": 58 }, { "epoch": 0.0270921822982436, "grad_norm": 0.47251802682876587, "learning_rate": 1.1697777844051105e-05, "loss": 0.6856, "step": 59 }, { "epoch": 0.027551371828722304, "grad_norm": 0.48661911487579346, "learning_rate": 1.0332332985438248e-05, "loss": 0.6638, "step": 60 }, { "epoch": 0.02801056135920101, "grad_norm": 0.5135563611984253, "learning_rate": 9.042397785550405e-06, "loss": 0.621, "step": 61 }, { "epoch": 0.028469750889679714, "grad_norm": 0.4911552965641022, "learning_rate": 7.830427709355725e-06, "loss": 0.6375, "step": 62 }, { "epoch": 0.02892894042015842, "grad_norm": 0.522821843624115, "learning_rate": 6.698729810778065e-06, "loss": 0.6292, "step": 63 }, { "epoch": 0.029388129950637127, "grad_norm": 0.48059335350990295, "learning_rate": 5.649458341088915e-06, "loss": 0.6706, "step": 64 }, { "epoch": 0.029847319481115832, "grad_norm": 0.5201355814933777, "learning_rate": 4.684610648167503e-06, "loss": 0.681, "step": 65 }, { "epoch": 0.030306509011594537, "grad_norm": 0.5210537910461426, "learning_rate": 3.8060233744356633e-06, "loss": 0.6545, "step": 66 }, { "epoch": 0.03076569854207324, "grad_norm": 0.5189217329025269, "learning_rate": 3.0153689607045845e-06, "loss": 0.6052, "step": 67 }, { "epoch": 0.031224888072551946, "grad_norm": 0.559215784072876, "learning_rate": 2.314152462588659e-06, "loss": 0.6467, "step": 68 }, { "epoch": 0.03168407760303065, "grad_norm": 0.5408816337585449, "learning_rate": 1.70370868554659e-06, "loss": 0.6004, "step": 69 }, { "epoch": 0.03214326713350935, "grad_norm": 0.5203858017921448, "learning_rate": 1.1851996440033319e-06, "loss": 0.6152, "step": 70 }, { "epoch": 0.03260245666398806, "grad_norm": 0.5649285316467285, "learning_rate": 7.596123493895991e-07, "loss": 0.5943, "step": 71 }, { "epoch": 0.03306164619446677, "grad_norm": 0.5590580105781555, "learning_rate": 4.277569313094809e-07, "loss": 0.6372, "step": 72 }, { "epoch": 0.03352083572494547, "grad_norm": 0.5505293011665344, "learning_rate": 1.9026509541272275e-07, "loss": 0.5813, "step": 73 }, { "epoch": 0.03398002525542418, "grad_norm": 0.5618359446525574, "learning_rate": 4.7588920907110094e-08, "loss": 0.6209, "step": 74 }, { "epoch": 0.03443921478590288, "grad_norm": 0.5541989207267761, "learning_rate": 0.0, "loss": 0.6078, "step": 75 }, { "epoch": 0.03443921478590288, "eval_loss": 0.6394286155700684, "eval_runtime": 464.4129, "eval_samples_per_second": 3.949, "eval_steps_per_second": 1.975, "step": 75 } ], "logging_steps": 1, "max_steps": 75, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.119015678246912e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }