|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.024486226497595, |
|
"global_step": 4600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 155.33, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 175.1775, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 165.6332, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 161.1489, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 2.4999999999999998e-05, |
|
"loss": 176.11, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 153.7462, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 3.5e-05, |
|
"loss": 170.9414, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 3.9999999999999996e-05, |
|
"loss": 160.0549, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 151.4735, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.9999999999999996e-05, |
|
"loss": 167.8096, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 5.499999999999999e-05, |
|
"loss": 141.9121, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 153.8951, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 6.5e-05, |
|
"loss": 131.0289, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7e-05, |
|
"loss": 101.1117, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 7.5e-05, |
|
"loss": 100.5863, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 7.999999999999999e-05, |
|
"loss": 80.6457, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 8.499999999999999e-05, |
|
"loss": 80.4102, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 70.8443, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 9.499999999999999e-05, |
|
"loss": 61.107, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.999999999999999e-05, |
|
"loss": 56.9596, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 40.8098, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00010999999999999998, |
|
"loss": 36.5124, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000115, |
|
"loss": 28.1064, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 20.9962, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000125, |
|
"loss": 16.4393, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00013, |
|
"loss": 10.9059, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000135, |
|
"loss": 8.8954, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00014, |
|
"loss": 7.7207, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.000145, |
|
"loss": 7.0725, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00015, |
|
"loss": 6.9189, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.000155, |
|
"loss": 6.9459, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00015999999999999999, |
|
"loss": 6.8881, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.000165, |
|
"loss": 6.8779, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00016999999999999999, |
|
"loss": 6.897, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.000175, |
|
"loss": 6.7802, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 6.8897, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.000185, |
|
"loss": 6.8261, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018999999999999998, |
|
"loss": 6.7646, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000195, |
|
"loss": 6.7816, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00019999999999999998, |
|
"loss": 6.7041, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.000205, |
|
"loss": 6.7975, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 6.6509, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.000215, |
|
"loss": 6.6573, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00021999999999999995, |
|
"loss": 6.6817, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.000225, |
|
"loss": 6.5168, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00023, |
|
"loss": 6.7404, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00023499999999999997, |
|
"loss": 6.605, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 6.5657, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.000245, |
|
"loss": 6.6316, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00025, |
|
"loss": 6.4709, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00025499999999999996, |
|
"loss": 6.6643, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00026, |
|
"loss": 6.5056, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.000265, |
|
"loss": 6.5331, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00027, |
|
"loss": 6.5842, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00027499999999999996, |
|
"loss": 6.4195, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00028, |
|
"loss": 6.6377, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.000285, |
|
"loss": 6.4403, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00029, |
|
"loss": 6.5407, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00029499999999999996, |
|
"loss": 6.5123, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0003, |
|
"loss": 6.4086, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_cer": 1.0, |
|
"eval_loss": 6.509830951690674, |
|
"eval_runtime": 1134.0889, |
|
"eval_samples_per_second": 7.724, |
|
"eval_wer": 1.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00029986535008976655, |
|
"loss": 6.5638, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0002997307001795332, |
|
"loss": 6.4348, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0002995960502692998, |
|
"loss": 6.5179, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0002994614003590664, |
|
"loss": 6.4513, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.000299326750448833, |
|
"loss": 6.4153, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0002991921005385996, |
|
"loss": 6.5478, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002990574506283662, |
|
"loss": 6.3301, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0002989228007181328, |
|
"loss": 6.5422, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00029878815080789945, |
|
"loss": 6.4761, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0002986535008976661, |
|
"loss": 6.3739, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00029851885098743265, |
|
"loss": 6.5702, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0002983842010771993, |
|
"loss": 6.3541, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00029824955116696585, |
|
"loss": 6.5389, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0002981149012567325, |
|
"loss": 6.4416, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00029798025134649905, |
|
"loss": 6.3941, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0002978456014362657, |
|
"loss": 6.784, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0002977109515260323, |
|
"loss": 6.3669, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0002975763016157989, |
|
"loss": 6.5267, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002974416517055655, |
|
"loss": 6.4389, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0002973070017953321, |
|
"loss": 6.3461, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0002971723518850987, |
|
"loss": 6.5165, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00029703770197486533, |
|
"loss": 6.3583, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00029690305206463196, |
|
"loss": 6.5063, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00029676840215439853, |
|
"loss": 6.4102, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00029663375224416516, |
|
"loss": 6.3466, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00029649910233393173, |
|
"loss": 6.5043, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00029636445242369836, |
|
"loss": 6.3248, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.000296229802513465, |
|
"loss": 6.4586, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00029609515260323155, |
|
"loss": 6.4547, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0002959605026929982, |
|
"loss": 6.3167, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00029582585278276475, |
|
"loss": 6.4654, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0002956912028725314, |
|
"loss": 6.2638, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.000295556552962298, |
|
"loss": 6.4081, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00029542190305206463, |
|
"loss": 6.367, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0002952872531418312, |
|
"loss": 6.3051, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00029515260323159783, |
|
"loss": 6.481, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0002950179533213644, |
|
"loss": 6.2663, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00029488330341113103, |
|
"loss": 6.4301, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00029474865350089766, |
|
"loss": 6.2978, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002946140035906643, |
|
"loss": 6.2719, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00029447935368043086, |
|
"loss": 6.41, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0002943447037701975, |
|
"loss": 6.2289, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00029421005385996406, |
|
"loss": 6.3948, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002940754039497307, |
|
"loss": 6.3203, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00029394075403949726, |
|
"loss": 6.1822, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0002938061041292639, |
|
"loss": 6.3675, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0002936714542190305, |
|
"loss": 6.2043, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002935368043087971, |
|
"loss": 6.3436, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0002934021543985637, |
|
"loss": 6.2709, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0002932675044883303, |
|
"loss": 6.171, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0002931328545780969, |
|
"loss": 6.296, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00029299820466786354, |
|
"loss": 6.1652, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00029286355475763016, |
|
"loss": 6.2756, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00029272890484739674, |
|
"loss": 6.1802, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.00029259425493716336, |
|
"loss": 6.1664, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.00029245960502692994, |
|
"loss": 6.1878, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.00029232495511669656, |
|
"loss": 5.9369, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0002921903052064632, |
|
"loss": 6.1776, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00029205565529622976, |
|
"loss": 6.0142, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0002919210053859964, |
|
"loss": 6.1265, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_cer": 0.9626404073685787, |
|
"eval_loss": 5.982735633850098, |
|
"eval_runtime": 1169.314, |
|
"eval_samples_per_second": 7.492, |
|
"eval_wer": 1.0, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.00029178635547576296, |
|
"loss": 5.9301, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0002916517055655296, |
|
"loss": 5.9003, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0002915170556552962, |
|
"loss": 5.7485, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00029138240574506284, |
|
"loss": 5.6099, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002912477558348294, |
|
"loss": 5.5064, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00029111310592459604, |
|
"loss": 5.3511, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0002909784560143626, |
|
"loss": 5.1143, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00029084380610412924, |
|
"loss": 4.9744, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00029070915619389587, |
|
"loss": 4.6601, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00029057450628366244, |
|
"loss": 4.3969, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00029043985637342907, |
|
"loss": 4.2051, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00029030520646319564, |
|
"loss": 4.1824, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00029017055655296227, |
|
"loss": 4.0095, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00029003590664272884, |
|
"loss": 3.8963, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.00028990125673249547, |
|
"loss": 3.7867, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0002897666068222621, |
|
"loss": 3.7578, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0002896319569120287, |
|
"loss": 3.7065, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0002894973070017953, |
|
"loss": 3.5965, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.0002893626570915619, |
|
"loss": 3.5597, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.0002892280071813285, |
|
"loss": 3.5877, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0002890933572710951, |
|
"loss": 3.5316, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.00028895870736086175, |
|
"loss": 3.5905, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.00028882405745062837, |
|
"loss": 3.5515, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.00028868940754039495, |
|
"loss": 3.4082, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.00028855475763016157, |
|
"loss": 3.5074, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.00028842010771992814, |
|
"loss": 3.5773, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.00028828545780969477, |
|
"loss": 3.426, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.0002881508078994614, |
|
"loss": 3.3261, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00028801615798922797, |
|
"loss": 3.3198, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002878815080789946, |
|
"loss": 3.3809, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00028774685816876117, |
|
"loss": 3.3378, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.0002876122082585278, |
|
"loss": 3.2242, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.0002874775583482944, |
|
"loss": 3.2431, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00028734290843806105, |
|
"loss": 3.2502, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.0002872082585278276, |
|
"loss": 3.2542, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00028707360861759425, |
|
"loss": 3.1088, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0002869389587073608, |
|
"loss": 3.0092, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00028680430879712745, |
|
"loss": 3.0566, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.000286669658886894, |
|
"loss": 3.0401, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00028653500897666065, |
|
"loss": 2.9808, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0002864003590664273, |
|
"loss": 3.0996, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.00028626570915619385, |
|
"loss": 3.0149, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.0002861310592459605, |
|
"loss": 3.0153, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00028599640933572705, |
|
"loss": 3.0267, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.0002858617594254937, |
|
"loss": 2.8632, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.0002857271095152603, |
|
"loss": 2.8897, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00028559245960502693, |
|
"loss": 2.8773, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.0002854578096947935, |
|
"loss": 2.8656, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.00028532315978456013, |
|
"loss": 2.8248, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.0002851885098743267, |
|
"loss": 2.9358, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.0002850538599640933, |
|
"loss": 2.9389, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.00028491921005385995, |
|
"loss": 2.7758, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0002847845601436266, |
|
"loss": 2.8451, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.00028464991023339315, |
|
"loss": 2.7879, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.0002845152603231597, |
|
"loss": 2.7708, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.00028438061041292635, |
|
"loss": 2.7697, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.000284245960502693, |
|
"loss": 2.6994, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.0002841113105924596, |
|
"loss": 2.646, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.0002839766606822262, |
|
"loss": 2.7175, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.0002838420107719928, |
|
"loss": 2.5818, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_cer1": 0.5706086897808987, |
|
"eval_cer2": 0.5588515176374077, |
|
"eval_cer3": 0.5401176086246324, |
|
"eval_cer4": 0.5407772304324029, |
|
"eval_cer5": 0.5651804670912951, |
|
"eval_loss": 2.5096516609191895, |
|
"eval_runtime": 1141.0448, |
|
"eval_samples_per_second": 7.677, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.0002837073608617594, |
|
"loss": 2.7269, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.000283572710951526, |
|
"loss": 2.8614, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00028343806104129263, |
|
"loss": 2.6653, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.00028330341113105926, |
|
"loss": 2.689, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00028316876122082583, |
|
"loss": 2.5852, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.00028303411131059246, |
|
"loss": 2.6783, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00028289946140035903, |
|
"loss": 2.5618, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.00028276481149012566, |
|
"loss": 2.6193, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.00028263016157989223, |
|
"loss": 2.5681, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.00028249551166965886, |
|
"loss": 2.5623, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"eval_cer1": 0.5584551500624557, |
|
"eval_cer2": 0.5466119770303528, |
|
"eval_cer3": 0.522999019928128, |
|
"eval_cer4": 0.524195885250652, |
|
"eval_cer5": 0.5435244161358811, |
|
"eval_loss": 2.379859447479248, |
|
"eval_runtime": 1137.4958, |
|
"eval_samples_per_second": 7.701, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.0002823608617594255, |
|
"loss": 2.5353, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.00028222621184919206, |
|
"loss": 2.5668, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.0002820915619389587, |
|
"loss": 2.4749, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.00028195691202872526, |
|
"loss": 2.4638, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.0002818222621184919, |
|
"loss": 2.53, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.0002816876122082585, |
|
"loss": 2.3977, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.00028155296229802514, |
|
"loss": 2.4476, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.0002814183123877917, |
|
"loss": 2.3519, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00028128366247755834, |
|
"loss": 2.3922, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.0002811490125673249, |
|
"loss": 2.3761, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.00028101436265709154, |
|
"loss": 2.4301, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.00028087971274685816, |
|
"loss": 2.5032, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.00028074506283662473, |
|
"loss": 2.4313, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.00028061041292639136, |
|
"loss": 2.3814, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.00028047576301615793, |
|
"loss": 2.4016, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00028034111310592456, |
|
"loss": 2.2827, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.0002802064631956912, |
|
"loss": 2.3379, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.0002800718132854578, |
|
"loss": 2.3315, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.0002799371633752244, |
|
"loss": 2.2943, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.000279802513464991, |
|
"loss": 2.4029, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.0002796678635547576, |
|
"loss": 2.3145, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.0002795332136445242, |
|
"loss": 2.2538, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.00027939856373429084, |
|
"loss": 2.2717, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.0002792639138240574, |
|
"loss": 2.1891, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00027912926391382404, |
|
"loss": 2.2323, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.00027899461400359067, |
|
"loss": 2.2378, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.00027885996409335724, |
|
"loss": 2.2133, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.0002787253141831238, |
|
"loss": 2.1628, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.00027859066427289044, |
|
"loss": 2.1824, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.00027845601436265706, |
|
"loss": 2.1867, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00028929521276595743, |
|
"loss": 2.1458, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00028922872340425527, |
|
"loss": 2.2512, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00028916223404255317, |
|
"loss": 2.2017, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.000289095744680851, |
|
"loss": 2.2122, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002890292553191489, |
|
"loss": 2.2943, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00028896276595744675, |
|
"loss": 2.1862, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00028889627659574465, |
|
"loss": 2.1958, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00028882978723404254, |
|
"loss": 2.1499, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0002887632978723404, |
|
"loss": 2.4003, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0002886968085106383, |
|
"loss": 2.205, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0002886303191489361, |
|
"loss": 2.2114, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.000288563829787234, |
|
"loss": 2.2381, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00028849734042553187, |
|
"loss": 2.0985, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.00028843085106382976, |
|
"loss": 2.3753, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00028836436170212766, |
|
"loss": 2.1873, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0002882978723404255, |
|
"loss": 2.2122, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0002882313829787234, |
|
"loss": 2.0525, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00028816489361702124, |
|
"loss": 2.1569, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00028809840425531914, |
|
"loss": 2.2276, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.000288031914893617, |
|
"loss": 2.0076, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_cer1": 0.48924749333243306, |
|
"eval_cer2": 0.47238720262510253, |
|
"eval_cer3": 0.4526951976478275, |
|
"eval_cer4": 0.45085160500982, |
|
"eval_cer5": 0.46989384288747343, |
|
"eval_loss": 1.9238340854644775, |
|
"eval_runtime": 1120.0867, |
|
"eval_samples_per_second": 7.821, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0002879654255319149, |
|
"loss": 1.9779, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0002878989361702127, |
|
"loss": 2.1389, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0002878324468085106, |
|
"loss": 2.2141, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0002877659574468085, |
|
"loss": 2.251, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00028769946808510636, |
|
"loss": 2.2231, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.00028763297872340426, |
|
"loss": 2.155, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0002875664893617021, |
|
"loss": 2.1766, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.0002875, |
|
"loss": 2.079, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00028743351063829784, |
|
"loss": 2.1769, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.00028736702127659573, |
|
"loss": 2.131, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00028730053191489363, |
|
"loss": 2.1497, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.0002872340425531915, |
|
"loss": 2.1463, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00028716755319148937, |
|
"loss": 2.1748, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0002871010638297872, |
|
"loss": 2.2521, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00028703457446808506, |
|
"loss": 2.0849, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00028696808510638295, |
|
"loss": 2.0427, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.0002869015957446808, |
|
"loss": 2.0855, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0002868351063829787, |
|
"loss": 2.0552, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00028676861702127654, |
|
"loss": 2.1035, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00028670212765957443, |
|
"loss": 2.1035, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00028663563829787233, |
|
"loss": 2.1345, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.0002865691489361702, |
|
"loss": 2.1135, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.00028650265957446807, |
|
"loss": 2.1364, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.0002864361702127659, |
|
"loss": 2.3248, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.0002863696808510638, |
|
"loss": 2.3336, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00028630319148936165, |
|
"loss": 2.1158, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00028623670212765955, |
|
"loss": 2.1155, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.0002861702127659574, |
|
"loss": 2.1989, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.0002861037234042553, |
|
"loss": 2.2268, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.0002860372340425532, |
|
"loss": 2.0541, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00028597074468085103, |
|
"loss": 1.9792, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.0002859042553191489, |
|
"loss": 2.2246, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.00028583776595744677, |
|
"loss": 1.9866, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00028577127659574467, |
|
"loss": 2.1589, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0002857047872340425, |
|
"loss": 2.0232, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0002856382978723404, |
|
"loss": 2.0295, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.0002855718085106383, |
|
"loss": 2.0344, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.00028550531914893615, |
|
"loss": 2.0738, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.00028543882978723404, |
|
"loss": 2.2399, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.0002853723404255319, |
|
"loss": 2.1269, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.0002853058510638298, |
|
"loss": 2.1242, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0002852393617021276, |
|
"loss": 2.1683, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0002851728723404255, |
|
"loss": 1.9675, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.00028510638297872336, |
|
"loss": 2.0978, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.00028503989361702126, |
|
"loss": 2.1067, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.00028497340425531916, |
|
"loss": 1.9776, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.000284906914893617, |
|
"loss": 1.9753, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0002848404255319149, |
|
"loss": 2.0909, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.00028477393617021274, |
|
"loss": 1.9942, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.0002847074468085106, |
|
"loss": 1.9181, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002846409574468085, |
|
"loss": 2.1184, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002845744680851063, |
|
"loss": 2.0608, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.0002845079787234042, |
|
"loss": 2.1055, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.0002844414893617021, |
|
"loss": 2.1383, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.00028437499999999996, |
|
"loss": 1.97, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00028430851063829786, |
|
"loss": 2.0785, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.0002842420212765957, |
|
"loss": 2.1369, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.0002841755319148936, |
|
"loss": 2.0299, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.00028410904255319144, |
|
"loss": 2.0835, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.00028404255319148934, |
|
"loss": 1.9336, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_cer1": 0.47790418959521963, |
|
"eval_cer2": 0.46027891714520097, |
|
"eval_cer3": 0.4367200261352499, |
|
"eval_cer4": 0.43565472165877844, |
|
"eval_cer5": 0.4569851380042463, |
|
"eval_loss": 1.8061408996582031, |
|
"eval_runtime": 1152.9636, |
|
"eval_samples_per_second": 7.598, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.0002839760638297872, |
|
"loss": 1.9459, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.0002839095744680851, |
|
"loss": 1.8683, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.000283843085106383, |
|
"loss": 1.8963, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.0002837765957446808, |
|
"loss": 2.0346, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.0002837101063829787, |
|
"loss": 2.0598, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.00028364361702127656, |
|
"loss": 2.1834, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00028357712765957445, |
|
"loss": 2.0357, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.0002835106382978723, |
|
"loss": 2.0925, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0002834441489361702, |
|
"loss": 2.2516, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.0002833776595744681, |
|
"loss": 2.2491, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00028331117021276593, |
|
"loss": 2.2018, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00028324468085106383, |
|
"loss": 2.1119, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00028317819148936167, |
|
"loss": 2.1143, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.00028311170212765957, |
|
"loss": 2.3422, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.0002830452127659574, |
|
"loss": 2.1345, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.0002829787234042553, |
|
"loss": 2.2023, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.00028291223404255315, |
|
"loss": 2.0969, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00028284574468085105, |
|
"loss": 2.1492, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.00028277925531914894, |
|
"loss": 2.1605, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.0002827127659574468, |
|
"loss": 2.0944, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0002826462765957447, |
|
"loss": 1.9815, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.00028257978723404253, |
|
"loss": 2.0554, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.0002825132978723404, |
|
"loss": 2.007, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.00028244680851063827, |
|
"loss": 2.0277, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0002823803191489361, |
|
"loss": 1.9326, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.000282313829787234, |
|
"loss": 1.8932, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00028224734042553185, |
|
"loss": 2.0032, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00028218085106382975, |
|
"loss": 2.1045, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00028211436170212764, |
|
"loss": 1.976, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.0002820478723404255, |
|
"loss": 1.8952, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.0002819813829787234, |
|
"loss": 1.988, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.0002819148936170212, |
|
"loss": 2.1143, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0002818484042553191, |
|
"loss": 1.9619, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00028178191489361697, |
|
"loss": 2.0878, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00028171542553191486, |
|
"loss": 1.9068, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00028164893617021276, |
|
"loss": 2.0037, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.0002815824468085106, |
|
"loss": 1.9381, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.0002815159574468085, |
|
"loss": 1.9552, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00028144946808510634, |
|
"loss": 1.9375, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00028138297872340424, |
|
"loss": 2.0038, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.0002813164893617021, |
|
"loss": 1.9518, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.00028125, |
|
"loss": 2.2452, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0002811835106382979, |
|
"loss": 2.4543, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 0.0002811170212765957, |
|
"loss": 2.0192, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 0.0002810505319148936, |
|
"loss": 2.1015, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.00028098404255319146, |
|
"loss": 1.971, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.00028091755319148936, |
|
"loss": 1.9138, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 0.0002808510638297872, |
|
"loss": 1.938, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.0002807845744680851, |
|
"loss": 1.9073, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.00028071808510638294, |
|
"loss": 1.8454, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00028065159574468083, |
|
"loss": 1.9872, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.00028058510638297873, |
|
"loss": 1.8779, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 0.0002805186170212766, |
|
"loss": 1.9469, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.00028045212765957447, |
|
"loss": 2.0581, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.0002803856382978723, |
|
"loss": 1.8872, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 0.0002803191489361702, |
|
"loss": 2.0156, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.00028025265957446805, |
|
"loss": 2.1001, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00028018617021276595, |
|
"loss": 2.1155, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 0.00028011968085106385, |
|
"loss": 2.121, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.00028005319148936164, |
|
"loss": 2.0027, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_cer1": 0.47597987913979944, |
|
"eval_cer2": 0.45689909762100084, |
|
"eval_cer3": 0.4369160405096374, |
|
"eval_cer4": 0.43597668952638524, |
|
"eval_cer5": 0.4563906581740977, |
|
"eval_loss": 1.800094723701477, |
|
"eval_runtime": 1141.7387, |
|
"eval_samples_per_second": 7.673, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 0.00027998670212765953, |
|
"loss": 1.9812, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.00027992021276595743, |
|
"loss": 2.0124, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.0002798537234042553, |
|
"loss": 1.9499, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.00027978723404255317, |
|
"loss": 1.9615, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.000279720744680851, |
|
"loss": 1.9805, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.0002796542553191489, |
|
"loss": 1.9327, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 0.00027958776595744675, |
|
"loss": 2.004, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.00027952127659574465, |
|
"loss": 2.0279, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 0.00027945478723404255, |
|
"loss": 1.9121, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.0002793882978723404, |
|
"loss": 1.9119, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.0002793218085106383, |
|
"loss": 2.0259, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 0.00027925531914893613, |
|
"loss": 1.9752, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.000279188829787234, |
|
"loss": 1.9925, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00027912234042553187, |
|
"loss": 1.9936, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00027905585106382977, |
|
"loss": 1.8995, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.0002789893617021276, |
|
"loss": 1.9581, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 0.0002789228723404255, |
|
"loss": 1.8888, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 0.0002788563829787234, |
|
"loss": 1.9329, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.00027878989361702124, |
|
"loss": 2.0032, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.00027872340425531914, |
|
"loss": 1.9863, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.000278656914893617, |
|
"loss": 1.9392, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.0002785904255319149, |
|
"loss": 1.8759, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.0002785239361702127, |
|
"loss": 1.9832, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.0002784574468085106, |
|
"loss": 1.9111, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 0.0002783909574468085, |
|
"loss": 1.9362, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 0.00027832446808510636, |
|
"loss": 2.1686, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.00027825797872340426, |
|
"loss": 1.8924, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.0002781914893617021, |
|
"loss": 2.0049, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.000278125, |
|
"loss": 1.871, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.00027805851063829784, |
|
"loss": 1.7214, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 0.00027799202127659574, |
|
"loss": 1.8852, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.0002779255319148936, |
|
"loss": 1.8057, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.0002778590425531915, |
|
"loss": 2.0098, |
|
"step": 3930 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 0.0002777925531914894, |
|
"loss": 1.8596, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.0002777260638297872, |
|
"loss": 1.881, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.00027765957446808506, |
|
"loss": 2.0048, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.00027759308510638296, |
|
"loss": 1.9547, |
|
"step": 3970 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.0002775265957446808, |
|
"loss": 2.0153, |
|
"step": 3980 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 0.0002774601063829787, |
|
"loss": 1.8253, |
|
"step": 3990 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.00027739361702127654, |
|
"loss": 1.9285, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.00027732712765957444, |
|
"loss": 1.9543, |
|
"step": 4010 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 0.00027726063829787233, |
|
"loss": 1.8083, |
|
"step": 4020 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.0002771941489361702, |
|
"loss": 1.8892, |
|
"step": 4030 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.00027712765957446807, |
|
"loss": 1.8685, |
|
"step": 4040 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.0002770611702127659, |
|
"loss": 1.7633, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.0002769946808510638, |
|
"loss": 1.801, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 0.00027692819148936166, |
|
"loss": 1.8555, |
|
"step": 4070 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 0.00027686170212765955, |
|
"loss": 1.8881, |
|
"step": 4080 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.0002767952127659574, |
|
"loss": 1.8753, |
|
"step": 4090 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 0.0002767287234042553, |
|
"loss": 1.8913, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.0002766622340425532, |
|
"loss": 2.0024, |
|
"step": 4110 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00027659574468085103, |
|
"loss": 2.0692, |
|
"step": 4120 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 0.00027652925531914893, |
|
"loss": 1.9269, |
|
"step": 4130 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.00027646276595744677, |
|
"loss": 1.9242, |
|
"step": 4140 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 0.00027639627659574467, |
|
"loss": 1.8783, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.0002763297872340425, |
|
"loss": 1.7259, |
|
"step": 4160 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.0002762632978723404, |
|
"loss": 1.7952, |
|
"step": 4170 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 0.0002761968085106383, |
|
"loss": 1.7268, |
|
"step": 4180 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.00027613031914893615, |
|
"loss": 1.804, |
|
"step": 4190 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.00027606382978723404, |
|
"loss": 1.7527, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"eval_cer1": 0.45643293609263696, |
|
"eval_cer2": 0.4330434782608696, |
|
"eval_cer3": 0.41613851682456715, |
|
"eval_cer4": 0.41276280627193407, |
|
"eval_cer5": 0.4343099787685775, |
|
"eval_loss": 1.6673153638839722, |
|
"eval_runtime": 1162.9114, |
|
"eval_samples_per_second": 7.533, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.0002759973404255319, |
|
"loss": 1.8746, |
|
"step": 4210 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 0.0002759308510638298, |
|
"loss": 1.8192, |
|
"step": 4220 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.0002758643617021276, |
|
"loss": 1.9139, |
|
"step": 4230 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 0.0002757978723404255, |
|
"loss": 1.8454, |
|
"step": 4240 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.00027573138297872337, |
|
"loss": 1.8008, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.00027566489361702126, |
|
"loss": 1.8388, |
|
"step": 4260 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00027559840425531916, |
|
"loss": 1.6761, |
|
"step": 4270 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.000275531914893617, |
|
"loss": 1.7665, |
|
"step": 4280 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.0002754654255319149, |
|
"loss": 1.7748, |
|
"step": 4290 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00027539893617021274, |
|
"loss": 1.7312, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.0002753324468085106, |
|
"loss": 1.849, |
|
"step": 4310 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.0002752659574468085, |
|
"loss": 1.8165, |
|
"step": 4320 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.0002751994680851063, |
|
"loss": 1.8257, |
|
"step": 4330 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.0002751329787234042, |
|
"loss": 1.7717, |
|
"step": 4340 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.00027506648936170207, |
|
"loss": 1.8083, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 0.00027499999999999996, |
|
"loss": 1.8256, |
|
"step": 4360 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.00027493351063829786, |
|
"loss": 1.716, |
|
"step": 4370 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.0002748670212765957, |
|
"loss": 1.8734, |
|
"step": 4380 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.0002748005319148936, |
|
"loss": 1.7568, |
|
"step": 4390 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.00027473404255319144, |
|
"loss": 1.8443, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 0.00027466755319148934, |
|
"loss": 1.8433, |
|
"step": 4410 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.0002746010638297872, |
|
"loss": 1.7142, |
|
"step": 4420 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.0002745345744680851, |
|
"loss": 1.8259, |
|
"step": 4430 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 0.000274468085106383, |
|
"loss": 1.7237, |
|
"step": 4440 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 0.0002744015957446808, |
|
"loss": 1.7902, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.0002743351063829787, |
|
"loss": 1.7954, |
|
"step": 4460 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.00027426861702127656, |
|
"loss": 1.7452, |
|
"step": 4470 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.00027420212765957445, |
|
"loss": 1.7052, |
|
"step": 4480 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.0002741356382978723, |
|
"loss": 1.8716, |
|
"step": 4490 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.0002740691489361702, |
|
"loss": 1.9407, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.00027400265957446804, |
|
"loss": 1.7709, |
|
"step": 4510 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.00027393617021276593, |
|
"loss": 1.8658, |
|
"step": 4520 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 0.00027386968085106383, |
|
"loss": 1.9818, |
|
"step": 4530 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.0002738031914893617, |
|
"loss": 1.8163, |
|
"step": 4540 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.00027373670212765957, |
|
"loss": 1.7863, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.0002736702127659574, |
|
"loss": 1.8471, |
|
"step": 4560 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0002736037234042553, |
|
"loss": 1.8029, |
|
"step": 4570 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 0.00027353723404255315, |
|
"loss": 1.8216, |
|
"step": 4580 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.00027347074468085105, |
|
"loss": 1.6946, |
|
"step": 4590 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.00027340425531914895, |
|
"loss": 1.8107, |
|
"step": 4600 |
|
} |
|
], |
|
"max_steps": 45720, |
|
"num_train_epochs": 40, |
|
"total_flos": 4.155029507216204e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|