|
{ |
|
"best_metric": 80.09197164207703, |
|
"best_model_checkpoint": "./whisper-small-ha-adam-v5/checkpoint-2000", |
|
"epoch": 15.923566878980893, |
|
"eval_steps": 500, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1592356687898089, |
|
"grad_norm": 20.107877731323242, |
|
"learning_rate": 2.5e-05, |
|
"loss": 3.0102, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 21.660022735595703, |
|
"learning_rate": 5e-05, |
|
"loss": 2.5653, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47770700636942676, |
|
"grad_norm": 14.246131896972656, |
|
"learning_rate": 5e-05, |
|
"loss": 2.0386, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 13.428327560424805, |
|
"learning_rate": 5e-05, |
|
"loss": 1.883, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7961783439490446, |
|
"grad_norm": 12.753376007080078, |
|
"learning_rate": 5e-05, |
|
"loss": 1.7273, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 13.684127807617188, |
|
"learning_rate": 5e-05, |
|
"loss": 1.6506, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1146496815286624, |
|
"grad_norm": 10.537659645080566, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0013, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2738853503184713, |
|
"grad_norm": 9.208993911743164, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7727, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4331210191082802, |
|
"grad_norm": 10.912196159362793, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7837, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"grad_norm": 9.990174293518066, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7913, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7515923566878981, |
|
"grad_norm": 11.005951881408691, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7732, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.910828025477707, |
|
"grad_norm": 9.276304244995117, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8036, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.070063694267516, |
|
"grad_norm": 6.240780353546143, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5545, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.229299363057325, |
|
"grad_norm": 15.549118995666504, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2379, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.388535031847134, |
|
"grad_norm": 6.316082954406738, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2277, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.5477707006369426, |
|
"grad_norm": 8.334115028381348, |
|
"learning_rate": 5e-05, |
|
"loss": 0.248, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7070063694267517, |
|
"grad_norm": 8.026554107666016, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2683, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.8662420382165603, |
|
"grad_norm": 7.539698600769043, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2704, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.0254777070063694, |
|
"grad_norm": 3.8269407749176025, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2389, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"grad_norm": 5.021791458129883, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0983, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"eval_loss": 1.8208503723144531, |
|
"eval_runtime": 234.2259, |
|
"eval_samples_per_second": 2.818, |
|
"eval_steps_per_second": 0.179, |
|
"eval_wer": 83.21517532094272, |
|
"eval_wer_ortho": 85.52734375, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.343949044585987, |
|
"grad_norm": 4.637480735778809, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1165, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.5031847133757963, |
|
"grad_norm": 4.222353458404541, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1142, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.662420382165605, |
|
"grad_norm": 7.10737943649292, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1279, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.821656050955414, |
|
"grad_norm": 5.412852764129639, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1173, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.980891719745223, |
|
"grad_norm": 6.706114768981934, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1257, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.140127388535032, |
|
"grad_norm": 2.5801165103912354, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0762, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.2993630573248405, |
|
"grad_norm": 4.530727863311768, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0808, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.45859872611465, |
|
"grad_norm": 3.3920371532440186, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0668, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.617834394904459, |
|
"grad_norm": 5.061642169952393, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0865, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"grad_norm": 4.725163459777832, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0859, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.936305732484076, |
|
"grad_norm": 6.57818603515625, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0999, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.095541401273885, |
|
"grad_norm": 2.8120875358581543, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0699, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.254777070063694, |
|
"grad_norm": 4.141892433166504, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0558, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 5.414012738853503, |
|
"grad_norm": 3.9606029987335205, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0733, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.573248407643312, |
|
"grad_norm": 5.46377420425415, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0645, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 5.732484076433121, |
|
"grad_norm": 4.627284526824951, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0665, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.89171974522293, |
|
"grad_norm": 5.00495719909668, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0675, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 6.050955414012739, |
|
"grad_norm": 2.5908775329589844, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0596, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.210191082802548, |
|
"grad_norm": 5.327170372009277, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0487, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"grad_norm": 2.1913540363311768, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0514, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"eval_loss": 1.975398063659668, |
|
"eval_runtime": 246.0885, |
|
"eval_samples_per_second": 2.682, |
|
"eval_steps_per_second": 0.171, |
|
"eval_wer": 88.50354474037172, |
|
"eval_wer_ortho": 89.62890625, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.528662420382165, |
|
"grad_norm": 4.72745943069458, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0627, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 6.687898089171974, |
|
"grad_norm": 4.439643383026123, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0579, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 6.8471337579617835, |
|
"grad_norm": 4.479223251342773, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0551, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 7.006369426751593, |
|
"grad_norm": 5.471689224243164, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0637, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.165605095541402, |
|
"grad_norm": 1.6073918342590332, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0521, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 7.32484076433121, |
|
"grad_norm": 2.2465476989746094, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0414, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.484076433121019, |
|
"grad_norm": 2.386169910430908, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0617, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 7.643312101910828, |
|
"grad_norm": 2.7703945636749268, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0511, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 7.802547770700637, |
|
"grad_norm": 3.860624074935913, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0543, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 7.961783439490446, |
|
"grad_norm": 7.631902694702148, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0536, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.121019108280255, |
|
"grad_norm": 1.4489890336990356, |
|
"learning_rate": 5e-05, |
|
"loss": 0.031, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 8.280254777070065, |
|
"grad_norm": 1.750957727432251, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0309, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.439490445859873, |
|
"grad_norm": 2.7531442642211914, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0414, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 8.598726114649681, |
|
"grad_norm": 2.628211736679077, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0349, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 8.757961783439491, |
|
"grad_norm": 3.8813140392303467, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0465, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 8.9171974522293, |
|
"grad_norm": 1.4133429527282715, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0395, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.07643312101911, |
|
"grad_norm": 2.883612871170044, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0421, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 9.235668789808917, |
|
"grad_norm": 4.271716594696045, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0289, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.394904458598726, |
|
"grad_norm": 2.3349599838256836, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0311, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"grad_norm": 1.0621289014816284, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0393, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"eval_loss": 2.148721694946289, |
|
"eval_runtime": 240.3237, |
|
"eval_samples_per_second": 2.746, |
|
"eval_steps_per_second": 0.175, |
|
"eval_wer": 88.82927763939452, |
|
"eval_wer_ortho": 89.5703125, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.713375796178344, |
|
"grad_norm": 1.477952003479004, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0428, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 9.872611464968152, |
|
"grad_norm": 2.882534980773926, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0382, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 10.031847133757962, |
|
"grad_norm": 2.2467527389526367, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0386, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 10.19108280254777, |
|
"grad_norm": 2.468972682952881, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0332, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 10.35031847133758, |
|
"grad_norm": 2.6261913776397705, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0291, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 10.509554140127388, |
|
"grad_norm": 2.022120714187622, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0335, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 10.668789808917197, |
|
"grad_norm": 1.5274375677108765, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0327, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 10.828025477707007, |
|
"grad_norm": 4.571503639221191, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0369, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 10.987261146496815, |
|
"grad_norm": 6.347606182098389, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0393, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 11.146496815286625, |
|
"grad_norm": 2.409583568572998, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0231, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 11.305732484076433, |
|
"grad_norm": 2.1078784465789795, |
|
"learning_rate": 5e-05, |
|
"loss": 0.032, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 11.464968152866241, |
|
"grad_norm": 0.528281569480896, |
|
"learning_rate": 5e-05, |
|
"loss": 0.034, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 11.624203821656051, |
|
"grad_norm": 2.900444984436035, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0433, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 11.78343949044586, |
|
"grad_norm": 2.4435691833496094, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0232, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 11.94267515923567, |
|
"grad_norm": 3.484557628631592, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0328, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 12.101910828025478, |
|
"grad_norm": 5.183062553405762, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0261, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 12.261146496815286, |
|
"grad_norm": 6.2796630859375, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0363, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 12.420382165605096, |
|
"grad_norm": 2.45373797416687, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0323, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 12.579617834394904, |
|
"grad_norm": 5.370733261108398, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0317, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"grad_norm": 2.645756959915161, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0313, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"eval_loss": 2.1685144901275635, |
|
"eval_runtime": 237.2073, |
|
"eval_samples_per_second": 2.782, |
|
"eval_steps_per_second": 0.177, |
|
"eval_wer": 80.09197164207703, |
|
"eval_wer_ortho": 81.9921875, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.898089171974522, |
|
"grad_norm": 2.6092240810394287, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0373, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 13.05732484076433, |
|
"grad_norm": 3.123652935028076, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0293, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 13.21656050955414, |
|
"grad_norm": 2.5134246349334717, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0268, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 13.375796178343949, |
|
"grad_norm": 1.4545310735702515, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0294, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 13.535031847133759, |
|
"grad_norm": 2.473706007003784, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0307, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 13.694267515923567, |
|
"grad_norm": 2.8176300525665283, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0279, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 13.853503184713375, |
|
"grad_norm": 38.75226974487305, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0454, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 14.012738853503185, |
|
"grad_norm": 0.736247181892395, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0247, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 14.171974522292993, |
|
"grad_norm": 2.7903378009796143, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0289, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 14.331210191082803, |
|
"grad_norm": 2.184035301208496, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0216, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 14.490445859872612, |
|
"grad_norm": 2.464597702026367, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0275, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 14.64968152866242, |
|
"grad_norm": 4.4987335205078125, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0374, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 14.80891719745223, |
|
"grad_norm": 2.5459258556365967, |
|
"learning_rate": 5e-05, |
|
"loss": 0.031, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 14.968152866242038, |
|
"grad_norm": 2.8609278202056885, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0338, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 15.127388535031848, |
|
"grad_norm": 0.5692533850669861, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0154, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 15.286624203821656, |
|
"grad_norm": 1.785417914390564, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0285, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 15.445859872611464, |
|
"grad_norm": 3.1533737182617188, |
|
"learning_rate": 5e-05, |
|
"loss": 0.027, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 15.605095541401274, |
|
"grad_norm": 0.5182532072067261, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0293, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 15.764331210191083, |
|
"grad_norm": 2.0394535064697266, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0316, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 15.923566878980893, |
|
"grad_norm": 0.6453192234039307, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0213, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 15.923566878980893, |
|
"eval_loss": 2.2915444374084473, |
|
"eval_runtime": 239.3393, |
|
"eval_samples_per_second": 2.758, |
|
"eval_steps_per_second": 0.175, |
|
"eval_wer": 80.80091971642076, |
|
"eval_wer_ortho": 82.578125, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 2826, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 18, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.15261008887808e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|