checkpoints / checkpoint-423 /trainer_state.json
DazMashaly's picture
Upload folder
f18656e verified
{
"best_metric": 1.0,
"best_model_checkpoint": "/kaggle/working/new_downloads/checkpoint-397",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 423,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06297229219143577,
"grad_norm": 1.4191151857376099,
"learning_rate": 6.25e-05,
"loss": 4.3195,
"step": 25
},
{
"epoch": 0.12594458438287154,
"grad_norm": 1.5384018421173096,
"learning_rate": 0.000125,
"loss": 4.2512,
"step": 50
},
{
"epoch": 0.1889168765743073,
"grad_norm": 1.5718939304351807,
"learning_rate": 0.00018749999999999998,
"loss": 4.2373,
"step": 75
},
{
"epoch": 0.2518891687657431,
"grad_norm": 1.4780595302581787,
"learning_rate": 0.00025,
"loss": 4.2214,
"step": 100
},
{
"epoch": 0.3148614609571788,
"grad_norm": 1.543885588645935,
"learning_rate": 0.00029980818414322247,
"loss": 4.2333,
"step": 125
},
{
"epoch": 0.3778337531486146,
"grad_norm": 1.5833216905593872,
"learning_rate": 0.000298849104859335,
"loss": 4.2375,
"step": 150
},
{
"epoch": 0.44080604534005036,
"grad_norm": 1.8211495876312256,
"learning_rate": 0.00029789002557544756,
"loss": 4.2227,
"step": 175
},
{
"epoch": 0.5037783375314862,
"grad_norm": 1.5552014112472534,
"learning_rate": 0.0002969309462915601,
"loss": 4.2426,
"step": 200
},
{
"epoch": 0.5667506297229219,
"grad_norm": 1.5943366289138794,
"learning_rate": 0.0002959718670076726,
"loss": 4.2254,
"step": 225
},
{
"epoch": 0.6297229219143576,
"grad_norm": 1.9039855003356934,
"learning_rate": 0.0002950127877237851,
"loss": 4.2428,
"step": 250
},
{
"epoch": 0.6926952141057935,
"grad_norm": 1.624612808227539,
"learning_rate": 0.0002940537084398977,
"loss": 4.2183,
"step": 275
},
{
"epoch": 0.7556675062972292,
"grad_norm": 1.5801411867141724,
"learning_rate": 0.0002930946291560102,
"loss": 4.2032,
"step": 300
},
{
"epoch": 0.818639798488665,
"grad_norm": 1.6559851169586182,
"learning_rate": 0.0002921355498721227,
"loss": 4.1869,
"step": 325
},
{
"epoch": 0.8816120906801007,
"grad_norm": 1.6399421691894531,
"learning_rate": 0.00029117647058823524,
"loss": 4.2117,
"step": 350
},
{
"epoch": 0.9445843828715366,
"grad_norm": 1.6710602045059204,
"learning_rate": 0.0002902173913043478,
"loss": 4.2257,
"step": 375
},
{
"epoch": 1.0,
"eval_cer": 1.0,
"eval_loss": 4.691751956939697,
"eval_runtime": 1056.28,
"eval_samples_per_second": 2.082,
"eval_steps_per_second": 0.017,
"eval_wer": 1.0,
"step": 397
},
{
"epoch": 2.8368794326241136,
"grad_norm": 1.1363143920898438,
"learning_rate": 0.00026209386281588447,
"loss": 3.967,
"step": 400
},
{
"epoch": 3.0,
"eval_cer": 1.0,
"eval_loss": 4.709719657897949,
"eval_runtime": 540.9535,
"eval_samples_per_second": 4.065,
"eval_steps_per_second": 0.017,
"eval_wer": 1.0,
"step": 423
}
],
"logging_steps": 25,
"max_steps": 2820,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.8935455694848e+18,
"train_batch_size": 360,
"trial_name": null,
"trial_params": null
}