|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"global_step": 52560, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9809741248097414e-05, |
|
"loss": 0.6459, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9619482496194826e-05, |
|
"loss": 0.4121, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9429223744292238e-05, |
|
"loss": 0.3439, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.923896499238965e-05, |
|
"loss": 0.3113, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9048706240487066e-05, |
|
"loss": 0.2776, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.8858447488584478e-05, |
|
"loss": 0.2665, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.866818873668189e-05, |
|
"loss": 0.2538, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.8477929984779303e-05, |
|
"loss": 0.2583, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.8287671232876715e-05, |
|
"loss": 0.2515, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.8097412480974127e-05, |
|
"loss": 0.218, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.790715372907154e-05, |
|
"loss": 0.2041, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.771689497716895e-05, |
|
"loss": 0.1916, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.7526636225266364e-05, |
|
"loss": 0.1879, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.7336377473363776e-05, |
|
"loss": 0.2072, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7146118721461188e-05, |
|
"loss": 0.2066, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.69558599695586e-05, |
|
"loss": 0.2069, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.6765601217656012e-05, |
|
"loss": 0.1913, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6575342465753425e-05, |
|
"loss": 0.1942, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6385083713850837e-05, |
|
"loss": 0.171, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.619482496194825e-05, |
|
"loss": 0.1609, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.6004566210045664e-05, |
|
"loss": 0.1736, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5814307458143077e-05, |
|
"loss": 0.1696, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.562404870624049e-05, |
|
"loss": 0.1559, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.54337899543379e-05, |
|
"loss": 0.1577, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.5243531202435313e-05, |
|
"loss": 0.151, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.5053272450532725e-05, |
|
"loss": 0.147, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.4863013698630138e-05, |
|
"loss": 0.145, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.467275494672755e-05, |
|
"loss": 0.163, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.4482496194824962e-05, |
|
"loss": 0.162, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.4292237442922376e-05, |
|
"loss": 0.1484, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.4101978691019788e-05, |
|
"loss": 0.1385, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.39117199391172e-05, |
|
"loss": 0.1436, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.3721461187214612e-05, |
|
"loss": 0.1292, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.3531202435312025e-05, |
|
"loss": 0.1482, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.3340943683409437e-05, |
|
"loss": 0.1274, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9567564969397891, |
|
"eval_f1": 0.8423064061529643, |
|
"eval_loss": 0.14508096873760223, |
|
"eval_precision": 0.8260095568256348, |
|
"eval_recall": 0.8592592592592593, |
|
"eval_runtime": 35.0181, |
|
"eval_samples_per_second": 250.156, |
|
"eval_steps_per_second": 62.539, |
|
"step": 17520 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.3150684931506849e-05, |
|
"loss": 0.1016, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.2960426179604261e-05, |
|
"loss": 0.0952, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.2770167427701677e-05, |
|
"loss": 0.0896, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.2579908675799089e-05, |
|
"loss": 0.1053, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.2389649923896501e-05, |
|
"loss": 0.0977, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.2199391171993913e-05, |
|
"loss": 0.0992, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.2009132420091326e-05, |
|
"loss": 0.0977, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.1818873668188738e-05, |
|
"loss": 0.0809, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.162861491628615e-05, |
|
"loss": 0.09, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.1438356164383562e-05, |
|
"loss": 0.0993, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.1248097412480976e-05, |
|
"loss": 0.0991, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.1057838660578388e-05, |
|
"loss": 0.0964, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 1.08675799086758e-05, |
|
"loss": 0.0836, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.0677321156773213e-05, |
|
"loss": 0.0969, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.0487062404870625e-05, |
|
"loss": 0.0819, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 1.0296803652968037e-05, |
|
"loss": 0.1056, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.010654490106545e-05, |
|
"loss": 0.0796, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 9.916286149162863e-06, |
|
"loss": 0.077, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.726027397260275e-06, |
|
"loss": 0.0737, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.535768645357688e-06, |
|
"loss": 0.0968, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.3455098934551e-06, |
|
"loss": 0.0774, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.155251141552512e-06, |
|
"loss": 0.0734, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.964992389649924e-06, |
|
"loss": 0.0792, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.774733637747336e-06, |
|
"loss": 0.0771, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.584474885844748e-06, |
|
"loss": 0.076, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 8.394216133942162e-06, |
|
"loss": 0.0794, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 8.203957382039575e-06, |
|
"loss": 0.0842, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 8.013698630136987e-06, |
|
"loss": 0.0831, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 7.823439878234399e-06, |
|
"loss": 0.0804, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 7.633181126331813e-06, |
|
"loss": 0.081, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 7.442922374429224e-06, |
|
"loss": 0.0731, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 7.252663622526636e-06, |
|
"loss": 0.0615, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 7.0624048706240486e-06, |
|
"loss": 0.0823, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 6.8721461187214625e-06, |
|
"loss": 0.0613, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.681887366818875e-06, |
|
"loss": 0.0777, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9699693444846775, |
|
"eval_f1": 0.8876556316957822, |
|
"eval_loss": 0.11980047076940536, |
|
"eval_precision": 0.8790862167845537, |
|
"eval_recall": 0.8963937621832359, |
|
"eval_runtime": 35.3315, |
|
"eval_samples_per_second": 247.938, |
|
"eval_steps_per_second": 61.984, |
|
"step": 35040 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 6.491628614916287e-06, |
|
"loss": 0.0397, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 6.301369863013699e-06, |
|
"loss": 0.0581, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 6.111111111111112e-06, |
|
"loss": 0.0481, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 5.920852359208524e-06, |
|
"loss": 0.042, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 5.7305936073059365e-06, |
|
"loss": 0.0472, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 5.540334855403349e-06, |
|
"loss": 0.046, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.350076103500762e-06, |
|
"loss": 0.042, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.159817351598174e-06, |
|
"loss": 0.0428, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.969558599695586e-06, |
|
"loss": 0.0458, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 4.779299847792998e-06, |
|
"loss": 0.0371, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.589041095890411e-06, |
|
"loss": 0.0582, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.3987823439878235e-06, |
|
"loss": 0.04, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.2085235920852366e-06, |
|
"loss": 0.0559, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.018264840182649e-06, |
|
"loss": 0.0442, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 3.828006088280061e-06, |
|
"loss": 0.0482, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 3.6377473363774736e-06, |
|
"loss": 0.0434, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 3.447488584474886e-06, |
|
"loss": 0.0457, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 3.2572298325722984e-06, |
|
"loss": 0.0404, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 3.066971080669711e-06, |
|
"loss": 0.0521, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 2.876712328767123e-06, |
|
"loss": 0.0605, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 2.6864535768645362e-06, |
|
"loss": 0.05, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.4961948249619484e-06, |
|
"loss": 0.0471, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 2.3059360730593606e-06, |
|
"loss": 0.0403, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.1156773211567732e-06, |
|
"loss": 0.0533, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 1.925418569254186e-06, |
|
"loss": 0.04, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 1.7351598173515982e-06, |
|
"loss": 0.0435, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.5449010654490107e-06, |
|
"loss": 0.0417, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.3546423135464233e-06, |
|
"loss": 0.036, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.1643835616438357e-06, |
|
"loss": 0.0364, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 9.74124809741248e-07, |
|
"loss": 0.0339, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 7.838660578386606e-07, |
|
"loss": 0.0291, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 5.936073059360731e-07, |
|
"loss": 0.0351, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.0334855403348556e-07, |
|
"loss": 0.0393, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 2.1308980213089802e-07, |
|
"loss": 0.0342, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.2831050228310502e-08, |
|
"loss": 0.0438, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9766719005351363, |
|
"eval_f1": 0.9076281864883202, |
|
"eval_loss": 0.10344000160694122, |
|
"eval_precision": 0.9026412184306921, |
|
"eval_recall": 0.9126705653021443, |
|
"eval_runtime": 35.4439, |
|
"eval_samples_per_second": 247.151, |
|
"eval_steps_per_second": 61.788, |
|
"step": 52560 |
|
} |
|
], |
|
"max_steps": 52560, |
|
"num_train_epochs": 3, |
|
"total_flos": 4848998991932040.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|