|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"global_step": 87, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 3e-05, |
|
"loss": 3.1406, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_accuracy": 0.04514027769841723, |
|
"eval_loss": 3.076171875, |
|
"eval_runtime": 2.8635, |
|
"eval_samples_per_second": 30.033, |
|
"eval_steps_per_second": 2.095, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3e-05, |
|
"loss": 3.074, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_accuracy": 0.04514027769841723, |
|
"eval_loss": 3.076171875, |
|
"eval_runtime": 2.8038, |
|
"eval_samples_per_second": 30.673, |
|
"eval_steps_per_second": 2.14, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0557, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_accuracy": 0.04514027769841723, |
|
"eval_loss": 3.076171875, |
|
"eval_runtime": 1.812, |
|
"eval_samples_per_second": 47.46, |
|
"eval_steps_per_second": 3.311, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 2.9990221430845156e-05, |
|
"loss": 3.2166, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_accuracy": 0.045697388720644536, |
|
"eval_loss": 3.017578125, |
|
"eval_runtime": 1.7487, |
|
"eval_samples_per_second": 49.181, |
|
"eval_steps_per_second": 3.431, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.996089847276925e-05, |
|
"loss": 3.0989, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_accuracy": 0.04601165647677276, |
|
"eval_loss": 2.9921875, |
|
"eval_runtime": 2.7561, |
|
"eval_samples_per_second": 31.204, |
|
"eval_steps_per_second": 2.177, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 2.9912069357315394e-05, |
|
"loss": 3.0732, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_accuracy": 0.04635449402891263, |
|
"eval_loss": 2.974609375, |
|
"eval_runtime": 2.7568, |
|
"eval_samples_per_second": 31.196, |
|
"eval_steps_per_second": 2.176, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.9843797748334563e-05, |
|
"loss": 3.0867, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.04628306953888349, |
|
"eval_loss": 2.962890625, |
|
"eval_runtime": 1.7772, |
|
"eval_samples_per_second": 48.39, |
|
"eval_steps_per_second": 3.376, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2.975617265898004e-05, |
|
"loss": 2.979, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.04668304668304668, |
|
"eval_loss": 2.951171875, |
|
"eval_runtime": 2.0751, |
|
"eval_samples_per_second": 41.444, |
|
"eval_steps_per_second": 2.891, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 2.96493083356513e-05, |
|
"loss": 3.1838, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_accuracy": 0.04668304668304668, |
|
"eval_loss": 2.94140625, |
|
"eval_runtime": 2.063, |
|
"eval_samples_per_second": 41.687, |
|
"eval_steps_per_second": 2.908, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 2.952334410903845e-05, |
|
"loss": 2.9399, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_accuracy": 0.04674018627507, |
|
"eval_loss": 2.93359375, |
|
"eval_runtime": 1.7666, |
|
"eval_samples_per_second": 48.681, |
|
"eval_steps_per_second": 3.396, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 2.937844421246162e-05, |
|
"loss": 2.926, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_accuracy": 0.047125878521227356, |
|
"eval_loss": 2.92578125, |
|
"eval_runtime": 2.0537, |
|
"eval_samples_per_second": 41.875, |
|
"eval_steps_per_second": 2.921, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.9214797567742036e-05, |
|
"loss": 3.2144, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.047254442603279816, |
|
"eval_loss": 2.919921875, |
|
"eval_runtime": 1.7851, |
|
"eval_samples_per_second": 48.176, |
|
"eval_steps_per_second": 3.361, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 2.9032617538884018e-05, |
|
"loss": 2.978, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.04735443688932061, |
|
"eval_loss": 2.9140625, |
|
"eval_runtime": 2.7591, |
|
"eval_samples_per_second": 31.17, |
|
"eval_steps_per_second": 2.175, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 2.8832141653888998e-05, |
|
"loss": 3.0076, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_accuracy": 0.047640134849437174, |
|
"eval_loss": 2.908203125, |
|
"eval_runtime": 2.6767, |
|
"eval_samples_per_second": 32.129, |
|
"eval_steps_per_second": 2.242, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.861363129506436e-05, |
|
"loss": 2.9897, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_accuracy": 0.04772584423747214, |
|
"eval_loss": 2.90234375, |
|
"eval_runtime": 1.7524, |
|
"eval_samples_per_second": 49.075, |
|
"eval_steps_per_second": 3.424, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.8377371358230733e-05, |
|
"loss": 2.8831, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_accuracy": 0.04792583280955374, |
|
"eval_loss": 2.89453125, |
|
"eval_runtime": 2.0593, |
|
"eval_samples_per_second": 41.762, |
|
"eval_steps_per_second": 2.914, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.8123669881272247e-05, |
|
"loss": 2.9749, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_accuracy": 0.04786869321753043, |
|
"eval_loss": 2.88671875, |
|
"eval_runtime": 2.7597, |
|
"eval_samples_per_second": 31.163, |
|
"eval_steps_per_second": 2.174, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 2.7852857642513838e-05, |
|
"loss": 2.9431, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_accuracy": 0.04776869893148963, |
|
"eval_loss": 2.8828125, |
|
"eval_runtime": 1.7792, |
|
"eval_samples_per_second": 48.336, |
|
"eval_steps_per_second": 3.372, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 2.7565287729449473e-05, |
|
"loss": 3.0498, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_accuracy": 0.047882978115536254, |
|
"eval_loss": 2.876953125, |
|
"eval_runtime": 2.7592, |
|
"eval_samples_per_second": 31.168, |
|
"eval_steps_per_second": 2.175, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 2.7261335078383377e-05, |
|
"loss": 2.9409, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.047854408319524595, |
|
"eval_loss": 2.87109375, |
|
"eval_runtime": 1.7642, |
|
"eval_samples_per_second": 48.747, |
|
"eval_steps_per_second": 3.401, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.6941395985584656e-05, |
|
"loss": 2.96, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_accuracy": 0.04802582709559454, |
|
"eval_loss": 2.8671875, |
|
"eval_runtime": 1.7801, |
|
"eval_samples_per_second": 48.313, |
|
"eval_steps_per_second": 3.371, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.6605887590592547e-05, |
|
"loss": 3.0767, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_accuracy": 0.04784012342151877, |
|
"eval_loss": 2.86328125, |
|
"eval_runtime": 2.3678, |
|
"eval_samples_per_second": 36.32, |
|
"eval_steps_per_second": 2.534, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.6255247332346036e-05, |
|
"loss": 2.772, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_accuracy": 0.04792583280955374, |
|
"eval_loss": 2.859375, |
|
"eval_runtime": 2.7833, |
|
"eval_samples_per_second": 30.899, |
|
"eval_steps_per_second": 2.156, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 2.5889932378846963e-05, |
|
"loss": 3.0574, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eval_accuracy": 0.04801154219758871, |
|
"eval_loss": 2.853515625, |
|
"eval_runtime": 2.7758, |
|
"eval_samples_per_second": 30.982, |
|
"eval_steps_per_second": 2.162, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.5510419031100137e-05, |
|
"loss": 2.8137, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.048040111993600365, |
|
"eval_loss": 2.849609375, |
|
"eval_runtime": 2.7701, |
|
"eval_samples_per_second": 31.046, |
|
"eval_steps_per_second": 2.166, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 2.5117202102107707e-05, |
|
"loss": 2.8872, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.048282955259699445, |
|
"eval_loss": 2.84375, |
|
"eval_runtime": 1.7514, |
|
"eval_samples_per_second": 49.103, |
|
"eval_steps_per_second": 3.426, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.4710794271727415e-05, |
|
"loss": 3.0085, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_accuracy": 0.0484115193417519, |
|
"eval_loss": 2.83984375, |
|
"eval_runtime": 1.7625, |
|
"eval_samples_per_second": 48.794, |
|
"eval_steps_per_second": 3.404, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 2.4291725418235848e-05, |
|
"loss": 2.9165, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_accuracy": 0.0485115136277927, |
|
"eval_loss": 2.8359375, |
|
"eval_runtime": 2.0562, |
|
"eval_samples_per_second": 41.824, |
|
"eval_steps_per_second": 2.918, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 2.3860541927468265e-05, |
|
"loss": 2.8525, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.04861150791383349, |
|
"eval_loss": 2.833984375, |
|
"eval_runtime": 2.0728, |
|
"eval_samples_per_second": 41.49, |
|
"eval_steps_per_second": 2.895, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 2.341780598043574e-05, |
|
"loss": 2.7759, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_accuracy": 0.04845437403576938, |
|
"eval_loss": 2.830078125, |
|
"eval_runtime": 1.7727, |
|
"eval_samples_per_second": 48.512, |
|
"eval_steps_per_second": 3.385, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 2.2964094820348302e-05, |
|
"loss": 2.7312, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_accuracy": 0.048497228729786866, |
|
"eval_loss": 2.828125, |
|
"eval_runtime": 1.7542, |
|
"eval_samples_per_second": 49.026, |
|
"eval_steps_per_second": 3.42, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 2.25e-05, |
|
"loss": 2.6641, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.04866864750585681, |
|
"eval_loss": 2.826171875, |
|
"eval_runtime": 2.6551, |
|
"eval_samples_per_second": 32.39, |
|
"eval_steps_per_second": 2.26, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 2.2026126610496852e-05, |
|
"loss": 2.7896, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_accuracy": 0.04855436832181018, |
|
"eval_loss": 2.82421875, |
|
"eval_runtime": 1.7664, |
|
"eval_samples_per_second": 48.686, |
|
"eval_steps_per_second": 3.397, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 2.154309249233351e-05, |
|
"loss": 2.7878, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_accuracy": 0.04866864750585681, |
|
"eval_loss": 2.822265625, |
|
"eval_runtime": 1.7584, |
|
"eval_samples_per_second": 48.909, |
|
"eval_steps_per_second": 3.412, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.105152742984713e-05, |
|
"loss": 2.4028, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"eval_accuracy": 0.04869721730186846, |
|
"eval_loss": 2.8203125, |
|
"eval_runtime": 2.7671, |
|
"eval_samples_per_second": 31.08, |
|
"eval_steps_per_second": 2.168, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 2.055207233009872e-05, |
|
"loss": 2.5618, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_accuracy": 0.04878292668990344, |
|
"eval_loss": 2.818359375, |
|
"eval_runtime": 1.7727, |
|
"eval_samples_per_second": 48.514, |
|
"eval_steps_per_second": 3.385, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.0045378387252624e-05, |
|
"loss": 2.6697, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_accuracy": 0.04875435689389178, |
|
"eval_loss": 2.81640625, |
|
"eval_runtime": 2.7791, |
|
"eval_samples_per_second": 30.945, |
|
"eval_steps_per_second": 2.159, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.953210623354359e-05, |
|
"loss": 2.6333, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_accuracy": 0.048711502199874294, |
|
"eval_loss": 2.814453125, |
|
"eval_runtime": 2.7731, |
|
"eval_samples_per_second": 31.012, |
|
"eval_steps_per_second": 2.164, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.9012925077938318e-05, |
|
"loss": 2.4897, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"eval_accuracy": 0.04858293811782184, |
|
"eval_loss": 2.8125, |
|
"eval_runtime": 2.7903, |
|
"eval_samples_per_second": 30.821, |
|
"eval_steps_per_second": 2.15, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.848851183361466e-05, |
|
"loss": 2.4908, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_accuracy": 0.04865436260785098, |
|
"eval_loss": 2.810546875, |
|
"eval_runtime": 2.7759, |
|
"eval_samples_per_second": 30.981, |
|
"eval_steps_per_second": 2.161, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.7959550235396002e-05, |
|
"loss": 2.6926, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"eval_accuracy": 0.048768641791897605, |
|
"eval_loss": 2.80859375, |
|
"eval_runtime": 2.6681, |
|
"eval_samples_per_second": 32.233, |
|
"eval_steps_per_second": 2.249, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.7426729948291474e-05, |
|
"loss": 2.6602, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"eval_accuracy": 0.048868636077938406, |
|
"eval_loss": 2.806640625, |
|
"eval_runtime": 1.7824, |
|
"eval_samples_per_second": 48.25, |
|
"eval_steps_per_second": 3.366, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.689074566830434e-05, |
|
"loss": 2.8054, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_accuracy": 0.04885435117993257, |
|
"eval_loss": 2.8046875, |
|
"eval_runtime": 2.6637, |
|
"eval_samples_per_second": 32.286, |
|
"eval_steps_per_second": 2.253, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.635229621668098e-05, |
|
"loss": 2.5532, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_accuracy": 0.048954345465973374, |
|
"eval_loss": 2.8046875, |
|
"eval_runtime": 2.7813, |
|
"eval_samples_per_second": 30.921, |
|
"eval_steps_per_second": 2.157, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.5812083628781265e-05, |
|
"loss": 2.4756, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"eval_accuracy": 0.04906862465002, |
|
"eval_loss": 2.802734375, |
|
"eval_runtime": 2.3836, |
|
"eval_samples_per_second": 36.08, |
|
"eval_steps_per_second": 2.517, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.5270812238758407e-05, |
|
"loss": 2.6123, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_accuracy": 0.04912576424204331, |
|
"eval_loss": 2.80078125, |
|
"eval_runtime": 1.7527, |
|
"eval_samples_per_second": 49.068, |
|
"eval_steps_per_second": 3.423, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.4729187761241592e-05, |
|
"loss": 2.5117, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_accuracy": 0.04902576995600252, |
|
"eval_loss": 2.798828125, |
|
"eval_runtime": 1.7703, |
|
"eval_samples_per_second": 48.579, |
|
"eval_steps_per_second": 3.389, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 1.4187916371218739e-05, |
|
"loss": 2.5552, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"eval_accuracy": 0.04904005485400834, |
|
"eval_loss": 2.796875, |
|
"eval_runtime": 2.7807, |
|
"eval_samples_per_second": 30.928, |
|
"eval_steps_per_second": 2.158, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 1.3647703783319022e-05, |
|
"loss": 2.5122, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"eval_accuracy": 0.04902576995600252, |
|
"eval_loss": 2.794921875, |
|
"eval_runtime": 1.7675, |
|
"eval_samples_per_second": 48.658, |
|
"eval_steps_per_second": 3.395, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 1.310925433169566e-05, |
|
"loss": 2.5593, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"eval_accuracy": 0.04909719444603165, |
|
"eval_loss": 2.79296875, |
|
"eval_runtime": 1.7524, |
|
"eval_samples_per_second": 49.077, |
|
"eval_steps_per_second": 3.424, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 1.2573270051708529e-05, |
|
"loss": 2.5759, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"eval_accuracy": 0.04909719444603165, |
|
"eval_loss": 2.791015625, |
|
"eval_runtime": 2.7672, |
|
"eval_samples_per_second": 31.079, |
|
"eval_steps_per_second": 2.168, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.2040449764604002e-05, |
|
"loss": 2.5535, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_accuracy": 0.04929718301811325, |
|
"eval_loss": 2.7890625, |
|
"eval_runtime": 2.6669, |
|
"eval_samples_per_second": 32.247, |
|
"eval_steps_per_second": 2.25, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.1511488166385349e-05, |
|
"loss": 2.6531, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"eval_accuracy": 0.04939717730415405, |
|
"eval_loss": 2.787109375, |
|
"eval_runtime": 2.0564, |
|
"eval_samples_per_second": 41.821, |
|
"eval_steps_per_second": 2.918, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.098707492206169e-05, |
|
"loss": 2.5701, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_accuracy": 0.0495257413862065, |
|
"eval_loss": 2.78515625, |
|
"eval_runtime": 1.7609, |
|
"eval_samples_per_second": 48.84, |
|
"eval_steps_per_second": 3.407, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.0467893766456408e-05, |
|
"loss": 2.6621, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"eval_accuracy": 0.049654305468258955, |
|
"eval_loss": 2.783203125, |
|
"eval_runtime": 2.0679, |
|
"eval_samples_per_second": 41.587, |
|
"eval_steps_per_second": 2.901, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 9.954621612747371e-06, |
|
"loss": 2.532, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"eval_accuracy": 0.04958288097822981, |
|
"eval_loss": 2.78125, |
|
"eval_runtime": 1.7708, |
|
"eval_samples_per_second": 48.565, |
|
"eval_steps_per_second": 3.388, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 9.447927669901284e-06, |
|
"loss": 2.5928, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_accuracy": 0.049654305468258955, |
|
"eval_loss": 2.779296875, |
|
"eval_runtime": 1.752, |
|
"eval_samples_per_second": 49.086, |
|
"eval_steps_per_second": 3.425, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 8.948472570152874e-06, |
|
"loss": 2.5486, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.04969716016227644, |
|
"eval_loss": 2.775390625, |
|
"eval_runtime": 1.768, |
|
"eval_samples_per_second": 48.642, |
|
"eval_steps_per_second": 3.394, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 8.456907507666488e-06, |
|
"loss": 2.5009, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"eval_accuracy": 0.0497257299582881, |
|
"eval_loss": 2.7734375, |
|
"eval_runtime": 2.6592, |
|
"eval_samples_per_second": 32.341, |
|
"eval_steps_per_second": 2.256, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 7.97387338950315e-06, |
|
"loss": 2.4346, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"eval_accuracy": 0.049754299754299756, |
|
"eval_loss": 2.7734375, |
|
"eval_runtime": 1.77, |
|
"eval_samples_per_second": 48.588, |
|
"eval_steps_per_second": 3.39, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 7.500000000000004e-06, |
|
"loss": 2.3259, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_accuracy": 0.049740014856293924, |
|
"eval_loss": 2.771484375, |
|
"eval_runtime": 2.7629, |
|
"eval_samples_per_second": 31.127, |
|
"eval_steps_per_second": 2.172, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 7.035905179651701e-06, |
|
"loss": 2.3569, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"eval_accuracy": 0.04978286955031141, |
|
"eval_loss": 2.76953125, |
|
"eval_runtime": 2.7575, |
|
"eval_samples_per_second": 31.187, |
|
"eval_steps_per_second": 2.176, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 6.582194019564266e-06, |
|
"loss": 2.5898, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"eval_accuracy": 0.049811439346323066, |
|
"eval_loss": 2.76953125, |
|
"eval_runtime": 1.7477, |
|
"eval_samples_per_second": 49.209, |
|
"eval_steps_per_second": 3.433, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 6.1394580725317366e-06, |
|
"loss": 2.3657, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"eval_accuracy": 0.04976858465230558, |
|
"eval_loss": 2.767578125, |
|
"eval_runtime": 1.7819, |
|
"eval_samples_per_second": 48.263, |
|
"eval_steps_per_second": 3.367, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.708274581764155e-06, |
|
"loss": 2.4875, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_accuracy": 0.049754299754299756, |
|
"eval_loss": 2.767578125, |
|
"eval_runtime": 2.7652, |
|
"eval_samples_per_second": 31.1, |
|
"eval_steps_per_second": 2.17, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 5.289205728272587e-06, |
|
"loss": 2.4392, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"eval_accuracy": 0.0497257299582881, |
|
"eval_loss": 2.767578125, |
|
"eval_runtime": 2.0654, |
|
"eval_samples_per_second": 41.638, |
|
"eval_steps_per_second": 2.905, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.882797897892293e-06, |
|
"loss": 2.3595, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"eval_accuracy": 0.0497257299582881, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.674, |
|
"eval_samples_per_second": 32.162, |
|
"eval_steps_per_second": 2.244, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.4895809688998655e-06, |
|
"loss": 2.4757, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"eval_accuracy": 0.049811439346323066, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.7575, |
|
"eval_samples_per_second": 31.187, |
|
"eval_steps_per_second": 2.176, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.110067621153041e-06, |
|
"loss": 2.4617, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"eval_accuracy": 0.049811439346323066, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.7642, |
|
"eval_samples_per_second": 31.112, |
|
"eval_steps_per_second": 2.171, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 3.744752667653965e-06, |
|
"loss": 2.3376, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"eval_accuracy": 0.049925718530369693, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7636, |
|
"eval_samples_per_second": 48.763, |
|
"eval_steps_per_second": 3.402, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 3.394112409407455e-06, |
|
"loss": 2.3129, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"eval_accuracy": 0.04982572424432889, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.0925, |
|
"eval_samples_per_second": 41.099, |
|
"eval_steps_per_second": 2.867, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 3.0586040144153436e-06, |
|
"loss": 2.5703, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_accuracy": 0.04979715444831724, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7604, |
|
"eval_samples_per_second": 48.851, |
|
"eval_steps_per_second": 3.408, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 2.7386649216166233e-06, |
|
"loss": 2.3491, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"eval_accuracy": 0.049811439346323066, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.7645, |
|
"eval_samples_per_second": 31.109, |
|
"eval_steps_per_second": 2.17, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 2.4347122705505303e-06, |
|
"loss": 2.3484, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_accuracy": 0.049754299754299756, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.7683, |
|
"eval_samples_per_second": 31.066, |
|
"eval_steps_per_second": 2.167, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 2.1471423574861643e-06, |
|
"loss": 2.3782, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"eval_accuracy": 0.049740014856293924, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7554, |
|
"eval_samples_per_second": 48.991, |
|
"eval_steps_per_second": 3.418, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 1.8763301187277554e-06, |
|
"loss": 2.4033, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_accuracy": 0.04979715444831724, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.7591, |
|
"eval_samples_per_second": 31.17, |
|
"eval_steps_per_second": 2.175, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.6226286417692666e-06, |
|
"loss": 2.3821, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"eval_accuracy": 0.04976858465230558, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7671, |
|
"eval_samples_per_second": 48.668, |
|
"eval_steps_per_second": 3.395, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.3863687049356465e-06, |
|
"loss": 2.39, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"eval_accuracy": 0.04979715444831724, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7638, |
|
"eval_samples_per_second": 48.758, |
|
"eval_steps_per_second": 3.402, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.1678583461110026e-06, |
|
"loss": 2.3984, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"eval_accuracy": 0.049740014856293924, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.774, |
|
"eval_samples_per_second": 31.002, |
|
"eval_steps_per_second": 2.163, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 9.67382461115986e-07, |
|
"loss": 2.3936, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"eval_accuracy": 0.049754299754299756, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.0608, |
|
"eval_samples_per_second": 41.732, |
|
"eval_steps_per_second": 2.912, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 7.852024322579649e-07, |
|
"loss": 2.4414, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"eval_accuracy": 0.0497257299582881, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.3621, |
|
"eval_samples_per_second": 36.408, |
|
"eval_steps_per_second": 2.54, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 6.215557875383804e-07, |
|
"loss": 2.4727, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"eval_accuracy": 0.04969716016227644, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.7992, |
|
"eval_samples_per_second": 30.723, |
|
"eval_steps_per_second": 2.143, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.766558909615504e-07, |
|
"loss": 2.3192, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"eval_accuracy": 0.04971144506028227, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 2.0548, |
|
"eval_samples_per_second": 41.853, |
|
"eval_steps_per_second": 2.92, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.5069166434870014e-07, |
|
"loss": 2.4365, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_accuracy": 0.04966859036626479, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7544, |
|
"eval_samples_per_second": 49.02, |
|
"eval_steps_per_second": 3.42, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 2.438273410199598e-07, |
|
"loss": 2.5042, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"eval_accuracy": 0.04969716016227644, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7715, |
|
"eval_samples_per_second": 48.545, |
|
"eval_steps_per_second": 3.387, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 1.5620225166544155e-07, |
|
"loss": 2.4746, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"eval_accuracy": 0.04966859036626479, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.772, |
|
"eval_samples_per_second": 48.534, |
|
"eval_steps_per_second": 3.386, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 8.793064268460605e-08, |
|
"loss": 2.5383, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.049654305468258955, |
|
"eval_loss": 2.765625, |
|
"eval_runtime": 1.7578, |
|
"eval_samples_per_second": 48.924, |
|
"eval_steps_per_second": 3.413, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 87, |
|
"total_flos": 902636273664.0, |
|
"train_loss": 2.6778283831716956, |
|
"train_runtime": 390.8223, |
|
"train_samples_per_second": 3.523, |
|
"train_steps_per_second": 0.223 |
|
} |
|
], |
|
"max_steps": 87, |
|
"num_train_epochs": 3, |
|
"total_flos": 902636273664.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|