|
{ |
|
"best_metric": 0.897123396396637, |
|
"best_model_checkpoint": "./cocoa_outputs_resnet/checkpoint-980", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 980, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05102040816326531, |
|
"grad_norm": 9.393156051635742, |
|
"learning_rate": 1.979591836734694e-05, |
|
"loss": 1.8572, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10204081632653061, |
|
"grad_norm": 6.8461503982543945, |
|
"learning_rate": 1.9591836734693877e-05, |
|
"loss": 1.8224, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15306122448979592, |
|
"grad_norm": 8.049448013305664, |
|
"learning_rate": 1.9387755102040817e-05, |
|
"loss": 1.7955, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20408163265306123, |
|
"grad_norm": 6.181807041168213, |
|
"learning_rate": 1.9183673469387756e-05, |
|
"loss": 1.7641, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25510204081632654, |
|
"grad_norm": 6.991772651672363, |
|
"learning_rate": 1.8979591836734696e-05, |
|
"loss": 1.7421, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.30612244897959184, |
|
"grad_norm": 6.233796119689941, |
|
"learning_rate": 1.8775510204081636e-05, |
|
"loss": 1.7018, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 7.064742088317871, |
|
"learning_rate": 1.8571428571428575e-05, |
|
"loss": 1.6763, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.40816326530612246, |
|
"grad_norm": 7.047808647155762, |
|
"learning_rate": 1.836734693877551e-05, |
|
"loss": 1.654, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.45918367346938777, |
|
"grad_norm": 8.950271606445312, |
|
"learning_rate": 1.816326530612245e-05, |
|
"loss": 1.6461, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5102040816326531, |
|
"grad_norm": 7.686429023742676, |
|
"learning_rate": 1.795918367346939e-05, |
|
"loss": 1.6049, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5612244897959183, |
|
"grad_norm": 6.434399127960205, |
|
"learning_rate": 1.7755102040816327e-05, |
|
"loss": 1.5926, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6122448979591837, |
|
"grad_norm": 6.048853397369385, |
|
"learning_rate": 1.7551020408163266e-05, |
|
"loss": 1.5614, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6632653061224489, |
|
"grad_norm": 12.146856307983398, |
|
"learning_rate": 1.7346938775510206e-05, |
|
"loss": 1.5344, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 8.939432144165039, |
|
"learning_rate": 1.7142857142857142e-05, |
|
"loss": 1.4892, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7653061224489796, |
|
"grad_norm": 16.924053192138672, |
|
"learning_rate": 1.6938775510204085e-05, |
|
"loss": 1.4829, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.8163265306122449, |
|
"grad_norm": 10.929306983947754, |
|
"learning_rate": 1.673469387755102e-05, |
|
"loss": 1.5136, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8673469387755102, |
|
"grad_norm": 12.943446159362793, |
|
"learning_rate": 1.653061224489796e-05, |
|
"loss": 1.4316, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.9183673469387755, |
|
"grad_norm": 6.890258312225342, |
|
"learning_rate": 1.63265306122449e-05, |
|
"loss": 1.4373, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9693877551020408, |
|
"grad_norm": 9.790729522705078, |
|
"learning_rate": 1.612244897959184e-05, |
|
"loss": 1.4094, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8592057761732852, |
|
"eval_loss": 1.4767718315124512, |
|
"eval_runtime": 14.5787, |
|
"eval_samples_per_second": 19.0, |
|
"eval_steps_per_second": 2.401, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.0204081632653061, |
|
"grad_norm": 20.24932861328125, |
|
"learning_rate": 1.5918367346938776e-05, |
|
"loss": 1.4063, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 14.811591148376465, |
|
"learning_rate": 1.5714285714285715e-05, |
|
"loss": 1.3551, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.1224489795918366, |
|
"grad_norm": 18.894763946533203, |
|
"learning_rate": 1.5510204081632655e-05, |
|
"loss": 1.3314, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.1734693877551021, |
|
"grad_norm": 13.06529426574707, |
|
"learning_rate": 1.530612244897959e-05, |
|
"loss": 1.3112, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.2244897959183674, |
|
"grad_norm": 16.423351287841797, |
|
"learning_rate": 1.510204081632653e-05, |
|
"loss": 1.2519, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.2755102040816326, |
|
"grad_norm": 11.016277313232422, |
|
"learning_rate": 1.4897959183673472e-05, |
|
"loss": 1.3565, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.3265306122448979, |
|
"grad_norm": 7.271716594696045, |
|
"learning_rate": 1.469387755102041e-05, |
|
"loss": 1.2681, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.3775510204081631, |
|
"grad_norm": 8.437969207763672, |
|
"learning_rate": 1.448979591836735e-05, |
|
"loss": 1.2675, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 6.352739334106445, |
|
"learning_rate": 1.4285714285714287e-05, |
|
"loss": 1.1948, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.4795918367346939, |
|
"grad_norm": 17.921051025390625, |
|
"learning_rate": 1.4081632653061225e-05, |
|
"loss": 1.3366, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.5306122448979593, |
|
"grad_norm": 22.707721710205078, |
|
"learning_rate": 1.3877551020408165e-05, |
|
"loss": 1.2684, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.5816326530612246, |
|
"grad_norm": 8.037663459777832, |
|
"learning_rate": 1.3673469387755102e-05, |
|
"loss": 1.2322, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.6326530612244898, |
|
"grad_norm": 9.421684265136719, |
|
"learning_rate": 1.3469387755102042e-05, |
|
"loss": 1.1945, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.683673469387755, |
|
"grad_norm": 27.85601043701172, |
|
"learning_rate": 1.326530612244898e-05, |
|
"loss": 1.2519, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.7346938775510203, |
|
"grad_norm": 15.291147232055664, |
|
"learning_rate": 1.3061224489795918e-05, |
|
"loss": 1.1934, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 10.985498428344727, |
|
"learning_rate": 1.2857142857142859e-05, |
|
"loss": 1.123, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.836734693877551, |
|
"grad_norm": 9.827385902404785, |
|
"learning_rate": 1.2653061224489798e-05, |
|
"loss": 1.1677, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.8877551020408163, |
|
"grad_norm": 16.994632720947266, |
|
"learning_rate": 1.2448979591836736e-05, |
|
"loss": 1.171, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.9387755102040818, |
|
"grad_norm": 16.16167449951172, |
|
"learning_rate": 1.2244897959183674e-05, |
|
"loss": 1.1488, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.989795918367347, |
|
"grad_norm": 12.466065406799316, |
|
"learning_rate": 1.2040816326530614e-05, |
|
"loss": 1.0664, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8628158844765343, |
|
"eval_loss": 1.2089825868606567, |
|
"eval_runtime": 17.1021, |
|
"eval_samples_per_second": 16.197, |
|
"eval_steps_per_second": 2.047, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 2.0408163265306123, |
|
"grad_norm": 14.415572166442871, |
|
"learning_rate": 1.1836734693877552e-05, |
|
"loss": 1.1081, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.0918367346938775, |
|
"grad_norm": 16.120677947998047, |
|
"learning_rate": 1.1632653061224491e-05, |
|
"loss": 1.1347, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 9.960640907287598, |
|
"learning_rate": 1.1428571428571429e-05, |
|
"loss": 1.0755, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.193877551020408, |
|
"grad_norm": 14.734106063842773, |
|
"learning_rate": 1.1224489795918367e-05, |
|
"loss": 1.0976, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.2448979591836733, |
|
"grad_norm": 12.98324203491211, |
|
"learning_rate": 1.1020408163265306e-05, |
|
"loss": 0.9844, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.295918367346939, |
|
"grad_norm": 8.834336280822754, |
|
"learning_rate": 1.0816326530612246e-05, |
|
"loss": 1.0923, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.3469387755102042, |
|
"grad_norm": 10.948126792907715, |
|
"learning_rate": 1.0612244897959186e-05, |
|
"loss": 0.9711, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.3979591836734695, |
|
"grad_norm": 13.707374572753906, |
|
"learning_rate": 1.0408163265306123e-05, |
|
"loss": 1.0786, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.4489795918367347, |
|
"grad_norm": 18.081064224243164, |
|
"learning_rate": 1.0204081632653063e-05, |
|
"loss": 1.0399, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 12.470099449157715, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9344, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.5510204081632653, |
|
"grad_norm": 10.96716022491455, |
|
"learning_rate": 9.795918367346939e-06, |
|
"loss": 1.043, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.6020408163265305, |
|
"grad_norm": 7.244050025939941, |
|
"learning_rate": 9.591836734693878e-06, |
|
"loss": 0.9058, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.6530612244897958, |
|
"grad_norm": 10.213088035583496, |
|
"learning_rate": 9.387755102040818e-06, |
|
"loss": 0.9683, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.704081632653061, |
|
"grad_norm": 30.410015106201172, |
|
"learning_rate": 9.183673469387756e-06, |
|
"loss": 1.0396, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.7551020408163263, |
|
"grad_norm": 10.457310676574707, |
|
"learning_rate": 8.979591836734695e-06, |
|
"loss": 1.0215, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.806122448979592, |
|
"grad_norm": 12.294684410095215, |
|
"learning_rate": 8.775510204081633e-06, |
|
"loss": 0.9301, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 10.194703102111816, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.9541, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.9081632653061225, |
|
"grad_norm": 17.80396842956543, |
|
"learning_rate": 8.36734693877551e-06, |
|
"loss": 0.964, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.9591836734693877, |
|
"grad_norm": 12.601577758789062, |
|
"learning_rate": 8.16326530612245e-06, |
|
"loss": 1.0295, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8628158844765343, |
|
"eval_loss": 0.9923866987228394, |
|
"eval_runtime": 14.8495, |
|
"eval_samples_per_second": 18.654, |
|
"eval_steps_per_second": 2.357, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 3.010204081632653, |
|
"grad_norm": 12.689225196838379, |
|
"learning_rate": 7.959183673469388e-06, |
|
"loss": 0.9564, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 3.061224489795918, |
|
"grad_norm": 13.568258285522461, |
|
"learning_rate": 7.755102040816327e-06, |
|
"loss": 1.0031, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.1122448979591835, |
|
"grad_norm": 15.86646842956543, |
|
"learning_rate": 7.551020408163265e-06, |
|
"loss": 0.9434, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 3.163265306122449, |
|
"grad_norm": 16.88734245300293, |
|
"learning_rate": 7.346938775510205e-06, |
|
"loss": 0.9468, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 3.2142857142857144, |
|
"grad_norm": 14.894389152526855, |
|
"learning_rate": 7.1428571428571436e-06, |
|
"loss": 0.9284, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.2653061224489797, |
|
"grad_norm": 13.203142166137695, |
|
"learning_rate": 6.938775510204082e-06, |
|
"loss": 0.9288, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.316326530612245, |
|
"grad_norm": 13.70313835144043, |
|
"learning_rate": 6.734693877551021e-06, |
|
"loss": 0.8879, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 3.36734693877551, |
|
"grad_norm": 22.619674682617188, |
|
"learning_rate": 6.530612244897959e-06, |
|
"loss": 0.9367, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.4183673469387754, |
|
"grad_norm": 17.59436798095703, |
|
"learning_rate": 6.326530612244899e-06, |
|
"loss": 0.9176, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 3.4693877551020407, |
|
"grad_norm": 12.488346099853516, |
|
"learning_rate": 6.122448979591837e-06, |
|
"loss": 0.9448, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 3.520408163265306, |
|
"grad_norm": 7.665136337280273, |
|
"learning_rate": 5.918367346938776e-06, |
|
"loss": 0.8446, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 3.571428571428571, |
|
"grad_norm": 7.547515392303467, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.8465, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.622448979591837, |
|
"grad_norm": 5.4680304527282715, |
|
"learning_rate": 5.510204081632653e-06, |
|
"loss": 0.8302, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 3.673469387755102, |
|
"grad_norm": 22.36733055114746, |
|
"learning_rate": 5.306122448979593e-06, |
|
"loss": 0.9601, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.7244897959183674, |
|
"grad_norm": 12.001816749572754, |
|
"learning_rate": 5.1020408163265315e-06, |
|
"loss": 0.8835, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 3.7755102040816326, |
|
"grad_norm": 23.104984283447266, |
|
"learning_rate": 4.897959183673469e-06, |
|
"loss": 0.8024, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.826530612244898, |
|
"grad_norm": 13.406716346740723, |
|
"learning_rate": 4.693877551020409e-06, |
|
"loss": 1.0554, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 3.877551020408163, |
|
"grad_norm": 9.763223648071289, |
|
"learning_rate": 4.489795918367348e-06, |
|
"loss": 0.79, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 3.928571428571429, |
|
"grad_norm": 10.93140983581543, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.8532, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 3.979591836734694, |
|
"grad_norm": 10.841426849365234, |
|
"learning_rate": 4.081632653061225e-06, |
|
"loss": 0.8401, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8628158844765343, |
|
"eval_loss": 0.9142875075340271, |
|
"eval_runtime": 14.5094, |
|
"eval_samples_per_second": 19.091, |
|
"eval_steps_per_second": 2.412, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 4.030612244897959, |
|
"grad_norm": 11.179267883300781, |
|
"learning_rate": 3.877551020408164e-06, |
|
"loss": 0.8061, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 4.081632653061225, |
|
"grad_norm": 6.5305585861206055, |
|
"learning_rate": 3.6734693877551024e-06, |
|
"loss": 0.8525, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.13265306122449, |
|
"grad_norm": 7.101966857910156, |
|
"learning_rate": 3.469387755102041e-06, |
|
"loss": 0.7967, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 4.183673469387755, |
|
"grad_norm": 9.439040184020996, |
|
"learning_rate": 3.2653061224489794e-06, |
|
"loss": 0.8487, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 4.23469387755102, |
|
"grad_norm": 16.658971786499023, |
|
"learning_rate": 3.0612244897959185e-06, |
|
"loss": 0.8698, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 4.285714285714286, |
|
"grad_norm": 14.88980484008789, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.7877, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 4.336734693877551, |
|
"grad_norm": 10.878366470336914, |
|
"learning_rate": 2.6530612244897964e-06, |
|
"loss": 0.7731, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 4.387755102040816, |
|
"grad_norm": 11.978877067565918, |
|
"learning_rate": 2.4489795918367347e-06, |
|
"loss": 0.8627, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 4.438775510204081, |
|
"grad_norm": 20.543222427368164, |
|
"learning_rate": 2.244897959183674e-06, |
|
"loss": 0.8106, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 4.489795918367347, |
|
"grad_norm": 5.99076509475708, |
|
"learning_rate": 2.0408163265306125e-06, |
|
"loss": 0.8779, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 4.540816326530612, |
|
"grad_norm": 13.510481834411621, |
|
"learning_rate": 1.8367346938775512e-06, |
|
"loss": 0.8535, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 4.591836734693878, |
|
"grad_norm": 14.781784057617188, |
|
"learning_rate": 1.6326530612244897e-06, |
|
"loss": 0.8972, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.642857142857143, |
|
"grad_norm": 14.63200855255127, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.9312, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 4.6938775510204085, |
|
"grad_norm": 19.910818099975586, |
|
"learning_rate": 1.2244897959183673e-06, |
|
"loss": 0.8134, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 4.744897959183674, |
|
"grad_norm": 16.637697219848633, |
|
"learning_rate": 1.0204081632653063e-06, |
|
"loss": 0.8341, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 4.795918367346939, |
|
"grad_norm": 5.32245397567749, |
|
"learning_rate": 8.163265306122449e-07, |
|
"loss": 0.7553, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 4.846938775510204, |
|
"grad_norm": 13.930354118347168, |
|
"learning_rate": 6.122448979591837e-07, |
|
"loss": 0.8303, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 4.8979591836734695, |
|
"grad_norm": 18.44146728515625, |
|
"learning_rate": 4.0816326530612243e-07, |
|
"loss": 0.8247, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 4.948979591836735, |
|
"grad_norm": 15.450093269348145, |
|
"learning_rate": 2.0408163265306121e-07, |
|
"loss": 0.9838, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 38.064430236816406, |
|
"learning_rate": 0.0, |
|
"loss": 0.8213, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8628158844765343, |
|
"eval_loss": 0.897123396396637, |
|
"eval_runtime": 14.3653, |
|
"eval_samples_per_second": 19.283, |
|
"eval_steps_per_second": 2.436, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 980, |
|
"total_flos": 1.6633116935737344e+17, |
|
"train_loss": 1.1215976812401596, |
|
"train_runtime": 1562.8364, |
|
"train_samples_per_second": 5.01, |
|
"train_steps_per_second": 0.627 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 980, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6633116935737344e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|