speecht5-darija / checkpoint-1500 /trainer_state.json
HAMMALE's picture
Upload fine-tuned SpeechT5 model for Darija
2f99e8a verified
{
"best_global_step": 1500,
"best_metric": 0.4347842335700989,
"best_model_checkpoint": "./speecht5_finetuned_Darija/checkpoint-1500",
"epoch": 4.178583885594699,
"eval_steps": 100,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06975933031042902,
"grad_norm": 4.014983654022217,
"learning_rate": 2.4e-05,
"loss": 1.2037,
"step": 25
},
{
"epoch": 0.13951866062085805,
"grad_norm": 4.588967800140381,
"learning_rate": 4.8e-05,
"loss": 0.8946,
"step": 50
},
{
"epoch": 0.20927799093128707,
"grad_norm": 4.026744842529297,
"learning_rate": 7.3e-05,
"loss": 0.6685,
"step": 75
},
{
"epoch": 0.2790373212417161,
"grad_norm": 3.659444808959961,
"learning_rate": 9.8e-05,
"loss": 0.6168,
"step": 100
},
{
"epoch": 0.2790373212417161,
"eval_loss": 0.5272690057754517,
"eval_runtime": 41.6965,
"eval_samples_per_second": 30.578,
"eval_steps_per_second": 15.301,
"step": 100
},
{
"epoch": 0.3487966515521451,
"grad_norm": 6.453115940093994,
"learning_rate": 9.878947368421053e-05,
"loss": 0.5847,
"step": 125
},
{
"epoch": 0.41855598186257414,
"grad_norm": 3.9114222526550293,
"learning_rate": 9.747368421052632e-05,
"loss": 0.5667,
"step": 150
},
{
"epoch": 0.4883153121730031,
"grad_norm": 5.056523323059082,
"learning_rate": 9.615789473684212e-05,
"loss": 0.5662,
"step": 175
},
{
"epoch": 0.5580746424834322,
"grad_norm": 5.494399070739746,
"learning_rate": 9.48421052631579e-05,
"loss": 0.5699,
"step": 200
},
{
"epoch": 0.5580746424834322,
"eval_loss": 0.5231854915618896,
"eval_runtime": 37.0744,
"eval_samples_per_second": 34.39,
"eval_steps_per_second": 17.209,
"step": 200
},
{
"epoch": 0.6278339727938612,
"grad_norm": 2.748530864715576,
"learning_rate": 9.352631578947368e-05,
"loss": 0.5448,
"step": 225
},
{
"epoch": 0.6975933031042902,
"grad_norm": 3.9691007137298584,
"learning_rate": 9.221052631578948e-05,
"loss": 0.5358,
"step": 250
},
{
"epoch": 0.7673526334147193,
"grad_norm": 3.5358879566192627,
"learning_rate": 9.089473684210526e-05,
"loss": 0.5245,
"step": 275
},
{
"epoch": 0.8371119637251483,
"grad_norm": 2.21895432472229,
"learning_rate": 8.957894736842106e-05,
"loss": 0.5231,
"step": 300
},
{
"epoch": 0.8371119637251483,
"eval_loss": 0.47762706875801086,
"eval_runtime": 36.3582,
"eval_samples_per_second": 35.068,
"eval_steps_per_second": 17.548,
"step": 300
},
{
"epoch": 0.9068712940355773,
"grad_norm": 3.315195322036743,
"learning_rate": 8.826315789473684e-05,
"loss": 0.5349,
"step": 325
},
{
"epoch": 0.9766306243460062,
"grad_norm": 2.936798572540283,
"learning_rate": 8.694736842105264e-05,
"loss": 0.5248,
"step": 350
},
{
"epoch": 1.0446459713986747,
"grad_norm": 2.950599431991577,
"learning_rate": 8.563157894736843e-05,
"loss": 0.5101,
"step": 375
},
{
"epoch": 1.1144053017091036,
"grad_norm": 4.972070693969727,
"learning_rate": 8.431578947368422e-05,
"loss": 0.515,
"step": 400
},
{
"epoch": 1.1144053017091036,
"eval_loss": 0.49294987320899963,
"eval_runtime": 35.8818,
"eval_samples_per_second": 35.533,
"eval_steps_per_second": 17.781,
"step": 400
},
{
"epoch": 1.1841646320195327,
"grad_norm": 2.6673648357391357,
"learning_rate": 8.3e-05,
"loss": 0.5115,
"step": 425
},
{
"epoch": 1.2539239623299616,
"grad_norm": 3.388873338699341,
"learning_rate": 8.16842105263158e-05,
"loss": 0.5064,
"step": 450
},
{
"epoch": 1.3236832926403905,
"grad_norm": 2.4961979389190674,
"learning_rate": 8.036842105263158e-05,
"loss": 0.5028,
"step": 475
},
{
"epoch": 1.3934426229508197,
"grad_norm": 2.7970707416534424,
"learning_rate": 7.905263157894737e-05,
"loss": 0.4975,
"step": 500
},
{
"epoch": 1.3934426229508197,
"eval_loss": 0.4633351266384125,
"eval_runtime": 36.5245,
"eval_samples_per_second": 34.908,
"eval_steps_per_second": 17.468,
"step": 500
},
{
"epoch": 1.4632019532612488,
"grad_norm": 2.774756908416748,
"learning_rate": 7.773684210526317e-05,
"loss": 0.498,
"step": 525
},
{
"epoch": 1.5329612835716777,
"grad_norm": 5.114898204803467,
"learning_rate": 7.642105263157895e-05,
"loss": 0.4974,
"step": 550
},
{
"epoch": 1.6027206138821066,
"grad_norm": 3.1757712364196777,
"learning_rate": 7.510526315789475e-05,
"loss": 0.5068,
"step": 575
},
{
"epoch": 1.6724799441925358,
"grad_norm": 2.1318249702453613,
"learning_rate": 7.378947368421053e-05,
"loss": 0.5003,
"step": 600
},
{
"epoch": 1.6724799441925358,
"eval_loss": 0.45842912793159485,
"eval_runtime": 36.8677,
"eval_samples_per_second": 34.583,
"eval_steps_per_second": 17.305,
"step": 600
},
{
"epoch": 1.742239274502965,
"grad_norm": 3.224851131439209,
"learning_rate": 7.247368421052631e-05,
"loss": 0.4954,
"step": 625
},
{
"epoch": 1.8119986048133938,
"grad_norm": 2.2048707008361816,
"learning_rate": 7.115789473684211e-05,
"loss": 0.4954,
"step": 650
},
{
"epoch": 1.8817579351238227,
"grad_norm": 3.310940742492676,
"learning_rate": 6.98421052631579e-05,
"loss": 0.4898,
"step": 675
},
{
"epoch": 1.9515172654342519,
"grad_norm": 3.6913022994995117,
"learning_rate": 6.852631578947369e-05,
"loss": 0.4881,
"step": 700
},
{
"epoch": 1.9515172654342519,
"eval_loss": 0.4606294631958008,
"eval_runtime": 36.611,
"eval_samples_per_second": 34.826,
"eval_steps_per_second": 17.426,
"step": 700
},
{
"epoch": 2.01953261248692,
"grad_norm": 3.2030959129333496,
"learning_rate": 6.721052631578948e-05,
"loss": 0.4727,
"step": 725
},
{
"epoch": 2.0892919427973493,
"grad_norm": 3.458202362060547,
"learning_rate": 6.589473684210526e-05,
"loss": 0.4809,
"step": 750
},
{
"epoch": 2.159051273107778,
"grad_norm": 1.9468854665756226,
"learning_rate": 6.457894736842106e-05,
"loss": 0.4749,
"step": 775
},
{
"epoch": 2.228810603418207,
"grad_norm": 3.895932912826538,
"learning_rate": 6.331578947368422e-05,
"loss": 0.4803,
"step": 800
},
{
"epoch": 2.228810603418207,
"eval_loss": 0.4526459872722626,
"eval_runtime": 37.4188,
"eval_samples_per_second": 34.074,
"eval_steps_per_second": 17.05,
"step": 800
},
{
"epoch": 2.2985699337286363,
"grad_norm": 3.0414257049560547,
"learning_rate": 6.2e-05,
"loss": 0.4774,
"step": 825
},
{
"epoch": 2.3683292640390654,
"grad_norm": 2.6127874851226807,
"learning_rate": 6.0684210526315785e-05,
"loss": 0.4745,
"step": 850
},
{
"epoch": 2.438088594349494,
"grad_norm": 3.0023088455200195,
"learning_rate": 5.936842105263158e-05,
"loss": 0.4791,
"step": 875
},
{
"epoch": 2.5078479246599232,
"grad_norm": 2.730001926422119,
"learning_rate": 5.805263157894737e-05,
"loss": 0.4785,
"step": 900
},
{
"epoch": 2.5078479246599232,
"eval_loss": 0.448452353477478,
"eval_runtime": 37.2061,
"eval_samples_per_second": 34.269,
"eval_steps_per_second": 17.148,
"step": 900
},
{
"epoch": 2.5776072549703524,
"grad_norm": 2.2317161560058594,
"learning_rate": 5.6736842105263166e-05,
"loss": 0.4732,
"step": 925
},
{
"epoch": 2.647366585280781,
"grad_norm": 3.1052346229553223,
"learning_rate": 5.542105263157895e-05,
"loss": 0.4801,
"step": 950
},
{
"epoch": 2.71712591559121,
"grad_norm": 2.309736967086792,
"learning_rate": 5.410526315789474e-05,
"loss": 0.4789,
"step": 975
},
{
"epoch": 2.7868852459016393,
"grad_norm": 2.5499184131622314,
"learning_rate": 5.2789473684210534e-05,
"loss": 0.4733,
"step": 1000
},
{
"epoch": 2.7868852459016393,
"eval_loss": 0.44482412934303284,
"eval_runtime": 36.9591,
"eval_samples_per_second": 34.498,
"eval_steps_per_second": 17.262,
"step": 1000
},
{
"epoch": 2.8566445762120685,
"grad_norm": 3.4246134757995605,
"learning_rate": 5.1473684210526317e-05,
"loss": 0.4778,
"step": 1025
},
{
"epoch": 2.9264039065224976,
"grad_norm": 2.3219287395477295,
"learning_rate": 5.0157894736842106e-05,
"loss": 0.4748,
"step": 1050
},
{
"epoch": 2.9961632368329263,
"grad_norm": 4.338850021362305,
"learning_rate": 4.8842105263157895e-05,
"loss": 0.4738,
"step": 1075
},
{
"epoch": 3.0641785838855946,
"grad_norm": 2.8604490756988525,
"learning_rate": 4.7526315789473684e-05,
"loss": 0.4542,
"step": 1100
},
{
"epoch": 3.0641785838855946,
"eval_loss": 0.4392930865287781,
"eval_runtime": 37.1499,
"eval_samples_per_second": 34.32,
"eval_steps_per_second": 17.174,
"step": 1100
},
{
"epoch": 3.1339379141960237,
"grad_norm": 3.0904340744018555,
"learning_rate": 4.6210526315789473e-05,
"loss": 0.4644,
"step": 1125
},
{
"epoch": 3.203697244506453,
"grad_norm": 3.41607403755188,
"learning_rate": 4.489473684210527e-05,
"loss": 0.4678,
"step": 1150
},
{
"epoch": 3.2734565748168816,
"grad_norm": 2.6169519424438477,
"learning_rate": 4.357894736842105e-05,
"loss": 0.4682,
"step": 1175
},
{
"epoch": 3.3432159051273107,
"grad_norm": 3.6565799713134766,
"learning_rate": 4.226315789473684e-05,
"loss": 0.4607,
"step": 1200
},
{
"epoch": 3.3432159051273107,
"eval_loss": 0.43838903307914734,
"eval_runtime": 37.1276,
"eval_samples_per_second": 34.341,
"eval_steps_per_second": 17.184,
"step": 1200
},
{
"epoch": 3.41297523543774,
"grad_norm": 2.939362049102783,
"learning_rate": 4.094736842105264e-05,
"loss": 0.4586,
"step": 1225
},
{
"epoch": 3.482734565748169,
"grad_norm": 3.203904390335083,
"learning_rate": 3.9631578947368426e-05,
"loss": 0.46,
"step": 1250
},
{
"epoch": 3.552493896058598,
"grad_norm": 3.043788433074951,
"learning_rate": 3.831578947368421e-05,
"loss": 0.4544,
"step": 1275
},
{
"epoch": 3.622253226369027,
"grad_norm": 4.401975631713867,
"learning_rate": 3.7e-05,
"loss": 0.4569,
"step": 1300
},
{
"epoch": 3.622253226369027,
"eval_loss": 0.4503220319747925,
"eval_runtime": 37.3024,
"eval_samples_per_second": 34.18,
"eval_steps_per_second": 17.103,
"step": 1300
},
{
"epoch": 3.692012556679456,
"grad_norm": 2.873894453048706,
"learning_rate": 3.5684210526315794e-05,
"loss": 0.4582,
"step": 1325
},
{
"epoch": 3.7617718869898846,
"grad_norm": 2.5376503467559814,
"learning_rate": 3.436842105263158e-05,
"loss": 0.4611,
"step": 1350
},
{
"epoch": 3.8315312173003138,
"grad_norm": 2.759460687637329,
"learning_rate": 3.3052631578947366e-05,
"loss": 0.4649,
"step": 1375
},
{
"epoch": 3.901290547610743,
"grad_norm": 2.2127342224121094,
"learning_rate": 3.173684210526316e-05,
"loss": 0.4582,
"step": 1400
},
{
"epoch": 3.901290547610743,
"eval_loss": 0.43552732467651367,
"eval_runtime": 37.4108,
"eval_samples_per_second": 34.081,
"eval_steps_per_second": 17.054,
"step": 1400
},
{
"epoch": 3.971049877921172,
"grad_norm": 3.02252459526062,
"learning_rate": 3.042105263157895e-05,
"loss": 0.4639,
"step": 1425
},
{
"epoch": 4.03906522497384,
"grad_norm": 3.38741397857666,
"learning_rate": 2.910526315789474e-05,
"loss": 0.4457,
"step": 1450
},
{
"epoch": 4.1088245552842695,
"grad_norm": 2.798316717147827,
"learning_rate": 2.7789473684210526e-05,
"loss": 0.4571,
"step": 1475
},
{
"epoch": 4.178583885594699,
"grad_norm": 2.803051471710205,
"learning_rate": 2.647368421052632e-05,
"loss": 0.451,
"step": 1500
},
{
"epoch": 4.178583885594699,
"eval_loss": 0.4347842335700989,
"eval_runtime": 37.2652,
"eval_samples_per_second": 34.214,
"eval_steps_per_second": 17.121,
"step": 1500
}
],
"logging_steps": 25,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1844132330585952.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}