mini-roberta-javanese / trainer_state.json
akahana's picture
End of training
832ba1d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 30.0,
"eval_steps": 500,
"global_step": 37620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.39872408293460926,
"grad_norm": 0.7819421887397766,
"learning_rate": 4.800637958532696e-05,
"loss": 8.5954,
"step": 500
},
{
"epoch": 0.7974481658692185,
"grad_norm": 1.4368996620178223,
"learning_rate": 4.6012759170653905e-05,
"loss": 7.6895,
"step": 1000
},
{
"epoch": 1.1961722488038278,
"grad_norm": 1.0771726369857788,
"learning_rate": 4.401913875598087e-05,
"loss": 7.5338,
"step": 1500
},
{
"epoch": 1.594896331738437,
"grad_norm": 1.4085040092468262,
"learning_rate": 4.2025518341307815e-05,
"loss": 7.4062,
"step": 2000
},
{
"epoch": 1.9936204146730463,
"grad_norm": 1.2863935232162476,
"learning_rate": 4.003189792663477e-05,
"loss": 7.3026,
"step": 2500
},
{
"epoch": 2.3923444976076556,
"grad_norm": 1.343314528465271,
"learning_rate": 3.8038277511961725e-05,
"loss": 7.2067,
"step": 3000
},
{
"epoch": 2.7910685805422646,
"grad_norm": 1.8292430639266968,
"learning_rate": 3.604465709728867e-05,
"loss": 7.1446,
"step": 3500
},
{
"epoch": 3.189792663476874,
"grad_norm": 1.3264261484146118,
"learning_rate": 3.4051036682615634e-05,
"loss": 7.075,
"step": 4000
},
{
"epoch": 3.588516746411483,
"grad_norm": 1.511952519416809,
"learning_rate": 3.205741626794259e-05,
"loss": 7.0362,
"step": 4500
},
{
"epoch": 3.9872408293460926,
"grad_norm": 1.9909762144088745,
"learning_rate": 3.0063795853269537e-05,
"loss": 6.9639,
"step": 5000
},
{
"epoch": 4.385964912280702,
"grad_norm": 1.8488012552261353,
"learning_rate": 2.8070175438596492e-05,
"loss": 6.9264,
"step": 5500
},
{
"epoch": 4.784688995215311,
"grad_norm": 1.4738715887069702,
"learning_rate": 2.6076555023923443e-05,
"loss": 6.9039,
"step": 6000
},
{
"epoch": 5.18341307814992,
"grad_norm": 1.7991809844970703,
"learning_rate": 2.4082934609250398e-05,
"loss": 6.8603,
"step": 6500
},
{
"epoch": 5.582137161084529,
"grad_norm": 1.811631441116333,
"learning_rate": 2.2089314194577353e-05,
"loss": 6.8354,
"step": 7000
},
{
"epoch": 5.980861244019139,
"grad_norm": 1.732555627822876,
"learning_rate": 2.0095693779904308e-05,
"loss": 6.8048,
"step": 7500
},
{
"epoch": 6.379585326953748,
"grad_norm": 1.7755110263824463,
"learning_rate": 1.8102073365231263e-05,
"loss": 6.7829,
"step": 8000
},
{
"epoch": 6.778309409888357,
"grad_norm": 2.225064277648926,
"learning_rate": 1.611244019138756e-05,
"loss": 6.7659,
"step": 8500
},
{
"epoch": 7.177033492822966,
"grad_norm": 1.872410774230957,
"learning_rate": 1.4118819776714515e-05,
"loss": 6.7374,
"step": 9000
},
{
"epoch": 7.575757575757576,
"grad_norm": 1.7238253355026245,
"learning_rate": 1.2125199362041468e-05,
"loss": 6.7349,
"step": 9500
},
{
"epoch": 7.974481658692185,
"grad_norm": 1.8047776222229004,
"learning_rate": 1.0131578947368421e-05,
"loss": 6.7204,
"step": 10000
},
{
"epoch": 8.373205741626794,
"grad_norm": 2.0625622272491455,
"learning_rate": 8.145933014354067e-06,
"loss": 6.6957,
"step": 10500
},
{
"epoch": 8.771929824561404,
"grad_norm": 1.8150039911270142,
"learning_rate": 6.152312599681021e-06,
"loss": 6.7014,
"step": 11000
},
{
"epoch": 9.170653907496012,
"grad_norm": 2.004660129547119,
"learning_rate": 4.158692185007974e-06,
"loss": 6.6855,
"step": 11500
},
{
"epoch": 9.569377990430622,
"grad_norm": 2.1859395503997803,
"learning_rate": 2.1650717703349284e-06,
"loss": 6.6813,
"step": 12000
},
{
"epoch": 9.96810207336523,
"grad_norm": 2.1167070865631104,
"learning_rate": 1.7145135566188198e-07,
"loss": 6.6833,
"step": 12500
},
{
"epoch": 10.0,
"step": 12540,
"total_flos": 5910924298141440.0,
"train_loss": 7.017849675472083,
"train_runtime": 3142.2268,
"train_samples_per_second": 510.596,
"train_steps_per_second": 3.991
},
{
"epoch": 10.36682615629984,
"grad_norm": 2.343043327331543,
"learning_rate": 4.87772461456672e-05,
"loss": 6.6945,
"step": 13000
},
{
"epoch": 10.76555023923445,
"grad_norm": 2.0774621963500977,
"learning_rate": 4.7448165869218504e-05,
"loss": 6.6465,
"step": 13500
},
{
"epoch": 11.164274322169058,
"grad_norm": 2.5551674365997314,
"learning_rate": 4.611908559276981e-05,
"loss": 6.6086,
"step": 14000
},
{
"epoch": 11.562998405103668,
"grad_norm": 2.335573196411133,
"learning_rate": 4.479000531632111e-05,
"loss": 6.5677,
"step": 14500
},
{
"epoch": 11.961722488038278,
"grad_norm": 2.7488040924072266,
"learning_rate": 4.346092503987241e-05,
"loss": 6.5439,
"step": 15000
},
{
"epoch": 12.360446570972886,
"grad_norm": 2.1827917098999023,
"learning_rate": 4.213184476342371e-05,
"loss": 6.4975,
"step": 15500
},
{
"epoch": 12.759170653907496,
"grad_norm": 2.1983468532562256,
"learning_rate": 4.080276448697501e-05,
"loss": 6.4953,
"step": 16000
},
{
"epoch": 13.157894736842104,
"grad_norm": 2.4948813915252686,
"learning_rate": 3.9473684210526316e-05,
"loss": 6.4546,
"step": 16500
},
{
"epoch": 13.556618819776714,
"grad_norm": 2.0236194133758545,
"learning_rate": 3.814460393407762e-05,
"loss": 6.432,
"step": 17000
},
{
"epoch": 13.955342902711324,
"grad_norm": 2.3203213214874268,
"learning_rate": 3.681552365762892e-05,
"loss": 6.4038,
"step": 17500
},
{
"epoch": 14.354066985645932,
"grad_norm": 2.347102165222168,
"learning_rate": 3.5486443381180226e-05,
"loss": 6.3872,
"step": 18000
},
{
"epoch": 14.752791068580542,
"grad_norm": 2.5316317081451416,
"learning_rate": 3.415736310473153e-05,
"loss": 6.3687,
"step": 18500
},
{
"epoch": 15.0,
"step": 18810,
"total_flos": 8866386447212160.0,
"train_loss": 2.1665261722384206,
"train_runtime": 1599.9695,
"train_samples_per_second": 1504.163,
"train_steps_per_second": 11.756
},
{
"epoch": 15.151515151515152,
"grad_norm": 2.2735719680786133,
"learning_rate": 4.974747474747475e-05,
"loss": 6.3524,
"step": 19000
},
{
"epoch": 15.55023923444976,
"grad_norm": 3.0628674030303955,
"learning_rate": 4.9082934609250406e-05,
"loss": 6.3525,
"step": 19500
},
{
"epoch": 15.94896331738437,
"grad_norm": 2.572157382965088,
"learning_rate": 4.841839447102605e-05,
"loss": 6.3283,
"step": 20000
},
{
"epoch": 16.34768740031898,
"grad_norm": 2.8556103706359863,
"learning_rate": 4.775385433280171e-05,
"loss": 6.3006,
"step": 20500
},
{
"epoch": 16.74641148325359,
"grad_norm": 2.7908451557159424,
"learning_rate": 4.7089314194577354e-05,
"loss": 6.2868,
"step": 21000
},
{
"epoch": 17.1451355661882,
"grad_norm": 2.637225389480591,
"learning_rate": 4.6424774056353006e-05,
"loss": 6.2645,
"step": 21500
},
{
"epoch": 17.54385964912281,
"grad_norm": 2.9248273372650146,
"learning_rate": 4.576023391812866e-05,
"loss": 6.24,
"step": 22000
},
{
"epoch": 17.942583732057415,
"grad_norm": 2.293976306915283,
"learning_rate": 4.509569377990431e-05,
"loss": 6.2331,
"step": 22500
},
{
"epoch": 18.341307814992025,
"grad_norm": 2.7412922382354736,
"learning_rate": 4.443115364167996e-05,
"loss": 6.1999,
"step": 23000
},
{
"epoch": 18.740031897926634,
"grad_norm": 3.194049119949341,
"learning_rate": 4.376661350345561e-05,
"loss": 6.1953,
"step": 23500
},
{
"epoch": 19.138755980861244,
"grad_norm": 2.366976022720337,
"learning_rate": 4.310207336523126e-05,
"loss": 6.1713,
"step": 24000
},
{
"epoch": 19.537480063795854,
"grad_norm": 2.459841728210449,
"learning_rate": 4.2437533227006915e-05,
"loss": 6.1535,
"step": 24500
},
{
"epoch": 19.93620414673046,
"grad_norm": 2.5043203830718994,
"learning_rate": 4.177432216905902e-05,
"loss": 6.1481,
"step": 25000
},
{
"epoch": 20.33492822966507,
"grad_norm": 2.6015625,
"learning_rate": 4.1109782030834664e-05,
"loss": 6.1228,
"step": 25500
},
{
"epoch": 20.73365231259968,
"grad_norm": 3.39577579498291,
"learning_rate": 4.0445241892610315e-05,
"loss": 6.1113,
"step": 26000
},
{
"epoch": 21.13237639553429,
"grad_norm": 2.7234418392181396,
"learning_rate": 3.978070175438597e-05,
"loss": 6.1074,
"step": 26500
},
{
"epoch": 21.5311004784689,
"grad_norm": 2.6176741123199463,
"learning_rate": 3.911749069643807e-05,
"loss": 6.0859,
"step": 27000
},
{
"epoch": 21.92982456140351,
"grad_norm": 2.5121538639068604,
"learning_rate": 3.845427963849017e-05,
"loss": 6.0804,
"step": 27500
},
{
"epoch": 22.328548644338117,
"grad_norm": 2.72497296333313,
"learning_rate": 3.778973950026582e-05,
"loss": 6.0623,
"step": 28000
},
{
"epoch": 22.727272727272727,
"grad_norm": 2.9392683506011963,
"learning_rate": 3.712519936204147e-05,
"loss": 6.0536,
"step": 28500
},
{
"epoch": 23.125996810207337,
"grad_norm": 2.5510990619659424,
"learning_rate": 3.646065922381712e-05,
"loss": 6.0508,
"step": 29000
},
{
"epoch": 23.524720893141946,
"grad_norm": 2.9767627716064453,
"learning_rate": 3.5796119085592774e-05,
"loss": 6.029,
"step": 29500
},
{
"epoch": 23.923444976076556,
"grad_norm": 2.644033908843994,
"learning_rate": 3.513157894736842e-05,
"loss": 6.0329,
"step": 30000
},
{
"epoch": 24.322169059011163,
"grad_norm": 2.623633861541748,
"learning_rate": 3.446836788942052e-05,
"loss": 6.003,
"step": 30500
},
{
"epoch": 24.720893141945773,
"grad_norm": 2.6472909450531006,
"learning_rate": 3.3803827751196174e-05,
"loss": 6.0,
"step": 31000
},
{
"epoch": 25.119617224880383,
"grad_norm": 3.0670645236968994,
"learning_rate": 3.3139287612971825e-05,
"loss": 5.9948,
"step": 31500
},
{
"epoch": 25.518341307814993,
"grad_norm": 2.6866748332977295,
"learning_rate": 3.247474747474748e-05,
"loss": 5.9883,
"step": 32000
},
{
"epoch": 25.917065390749602,
"grad_norm": 2.8670027256011963,
"learning_rate": 3.181020733652313e-05,
"loss": 5.9801,
"step": 32500
},
{
"epoch": 26.31578947368421,
"grad_norm": 2.797853708267212,
"learning_rate": 3.114566719829878e-05,
"loss": 5.9665,
"step": 33000
},
{
"epoch": 26.71451355661882,
"grad_norm": 3.076382637023926,
"learning_rate": 3.0481127060074432e-05,
"loss": 5.9655,
"step": 33500
},
{
"epoch": 27.11323763955343,
"grad_norm": 2.7438435554504395,
"learning_rate": 2.981791600212653e-05,
"loss": 5.9445,
"step": 34000
},
{
"epoch": 27.51196172248804,
"grad_norm": 3.119704484939575,
"learning_rate": 2.9153375863902184e-05,
"loss": 5.9319,
"step": 34500
},
{
"epoch": 27.91068580542265,
"grad_norm": 2.594749927520752,
"learning_rate": 2.8488835725677832e-05,
"loss": 5.9327,
"step": 35000
},
{
"epoch": 28.30940988835726,
"grad_norm": 2.6697604656219482,
"learning_rate": 2.7824295587453487e-05,
"loss": 5.9261,
"step": 35500
},
{
"epoch": 28.708133971291865,
"grad_norm": 3.4312615394592285,
"learning_rate": 2.7159755449229135e-05,
"loss": 5.9146,
"step": 36000
},
{
"epoch": 29.106858054226475,
"grad_norm": 3.0306286811828613,
"learning_rate": 2.6495215311004783e-05,
"loss": 5.9243,
"step": 36500
},
{
"epoch": 29.505582137161085,
"grad_norm": 2.841744899749756,
"learning_rate": 2.583067517278044e-05,
"loss": 5.9046,
"step": 37000
},
{
"epoch": 29.904306220095695,
"grad_norm": 2.9730348587036133,
"learning_rate": 2.5166135034556087e-05,
"loss": 5.8976,
"step": 37500
},
{
"epoch": 30.0,
"step": 37620,
"total_flos": 1.773277289442432e+16,
"train_loss": 3.0397992164007466,
"train_runtime": 4647.9256,
"train_samples_per_second": 1035.565,
"train_steps_per_second": 8.094
}
],
"logging_steps": 500,
"max_steps": 37620,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.773277289442432e+16,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}