|
{ |
|
"best_metric": 0.9496769905090332, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l58-l/checkpoint-9500", |
|
"epoch": 1.7498618530116044, |
|
"eval_steps": 500, |
|
"global_step": 9500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09209799226376865, |
|
"grad_norm": 0.0014697719598188996, |
|
"learning_rate": 4.884877509670289e-08, |
|
"loss": 0.4588, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09209799226376865, |
|
"eval_loss": 1.4194883108139038, |
|
"eval_runtime": 78.7703, |
|
"eval_samples_per_second": 15.31, |
|
"eval_steps_per_second": 1.917, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1841959845275373, |
|
"grad_norm": 301.76043701171875, |
|
"learning_rate": 4.769755019340578e-08, |
|
"loss": 0.4255, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.1841959845275373, |
|
"eval_loss": 1.3416553735733032, |
|
"eval_runtime": 77.0051, |
|
"eval_samples_per_second": 15.661, |
|
"eval_steps_per_second": 1.961, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.27629397679130596, |
|
"grad_norm": 35.28853225708008, |
|
"learning_rate": 4.654632529010867e-08, |
|
"loss": 0.3724, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.27629397679130596, |
|
"eval_loss": 1.2872973680496216, |
|
"eval_runtime": 77.5439, |
|
"eval_samples_per_second": 15.552, |
|
"eval_steps_per_second": 1.947, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.3683919690550746, |
|
"grad_norm": 338.9349060058594, |
|
"learning_rate": 4.539510038681156e-08, |
|
"loss": 0.3251, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.3683919690550746, |
|
"eval_loss": 1.234910249710083, |
|
"eval_runtime": 78.7715, |
|
"eval_samples_per_second": 15.31, |
|
"eval_steps_per_second": 1.917, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46048996131884323, |
|
"grad_norm": 325.85101318359375, |
|
"learning_rate": 4.4243875483514457e-08, |
|
"loss": 0.3308, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46048996131884323, |
|
"eval_loss": 1.1944907903671265, |
|
"eval_runtime": 79.0397, |
|
"eval_samples_per_second": 15.258, |
|
"eval_steps_per_second": 1.91, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.5525879535826119, |
|
"grad_norm": 100.85855102539062, |
|
"learning_rate": 4.309265058021735e-08, |
|
"loss": 0.3017, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.5525879535826119, |
|
"eval_loss": 1.1593009233474731, |
|
"eval_runtime": 77.6354, |
|
"eval_samples_per_second": 15.534, |
|
"eval_steps_per_second": 1.945, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6446859458463805, |
|
"grad_norm": 8.865598678588867, |
|
"learning_rate": 4.194142567692024e-08, |
|
"loss": 0.2962, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6446859458463805, |
|
"eval_loss": 1.1259427070617676, |
|
"eval_runtime": 78.2715, |
|
"eval_samples_per_second": 15.408, |
|
"eval_steps_per_second": 1.929, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7367839381101492, |
|
"grad_norm": 4.393447399139404, |
|
"learning_rate": 4.079020077362314e-08, |
|
"loss": 0.2919, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7367839381101492, |
|
"eval_loss": 1.0954023599624634, |
|
"eval_runtime": 78.5366, |
|
"eval_samples_per_second": 15.356, |
|
"eval_steps_per_second": 1.923, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8288819303739179, |
|
"grad_norm": 1.6036031246185303, |
|
"learning_rate": 3.9638975870326026e-08, |
|
"loss": 0.307, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8288819303739179, |
|
"eval_loss": 1.0728861093521118, |
|
"eval_runtime": 78.4032, |
|
"eval_samples_per_second": 15.382, |
|
"eval_steps_per_second": 1.926, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9209799226376865, |
|
"grad_norm": 11.990758895874023, |
|
"learning_rate": 3.8487750967028915e-08, |
|
"loss": 0.2764, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9209799226376865, |
|
"eval_loss": 1.0523593425750732, |
|
"eval_runtime": 78.3726, |
|
"eval_samples_per_second": 15.388, |
|
"eval_steps_per_second": 1.927, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013077914901455, |
|
"grad_norm": 43.865562438964844, |
|
"learning_rate": 3.733652606373181e-08, |
|
"loss": 0.2456, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013077914901455, |
|
"eval_loss": 1.0375442504882812, |
|
"eval_runtime": 78.2473, |
|
"eval_samples_per_second": 15.413, |
|
"eval_steps_per_second": 1.93, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.1051759071652238, |
|
"grad_norm": 105.91036987304688, |
|
"learning_rate": 3.61853011604347e-08, |
|
"loss": 0.2642, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1051759071652238, |
|
"eval_loss": 1.0233356952667236, |
|
"eval_runtime": 78.3135, |
|
"eval_samples_per_second": 15.4, |
|
"eval_steps_per_second": 1.928, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1972738994289924, |
|
"grad_norm": 221.4896240234375, |
|
"learning_rate": 3.503407625713759e-08, |
|
"loss": 0.2066, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1972738994289924, |
|
"eval_loss": 1.0104238986968994, |
|
"eval_runtime": 78.6485, |
|
"eval_samples_per_second": 15.334, |
|
"eval_steps_per_second": 1.92, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.289371891692761, |
|
"grad_norm": 270.53253173828125, |
|
"learning_rate": 3.3882851353840485e-08, |
|
"loss": 0.2376, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.289371891692761, |
|
"eval_loss": 0.998353898525238, |
|
"eval_runtime": 78.5502, |
|
"eval_samples_per_second": 15.353, |
|
"eval_steps_per_second": 1.922, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3814698839565298, |
|
"grad_norm": 402.9339904785156, |
|
"learning_rate": 3.2731626450543374e-08, |
|
"loss": 0.1931, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3814698839565298, |
|
"eval_loss": 0.9887453317642212, |
|
"eval_runtime": 78.4833, |
|
"eval_samples_per_second": 15.366, |
|
"eval_steps_per_second": 1.924, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4735678762202984, |
|
"grad_norm": 0.006355441175401211, |
|
"learning_rate": 3.158040154724626e-08, |
|
"loss": 0.2163, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4735678762202984, |
|
"eval_loss": 0.9767189621925354, |
|
"eval_runtime": 78.5499, |
|
"eval_samples_per_second": 15.353, |
|
"eval_steps_per_second": 1.922, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.565665868484067, |
|
"grad_norm": 0.12982100248336792, |
|
"learning_rate": 3.042917664394916e-08, |
|
"loss": 0.1903, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.565665868484067, |
|
"eval_loss": 0.9664921760559082, |
|
"eval_runtime": 78.6832, |
|
"eval_samples_per_second": 15.327, |
|
"eval_steps_per_second": 1.919, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.6577638607478358, |
|
"grad_norm": 173.31788635253906, |
|
"learning_rate": 2.927795174065205e-08, |
|
"loss": 0.2069, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.6577638607478358, |
|
"eval_loss": 0.9571623206138611, |
|
"eval_runtime": 78.544, |
|
"eval_samples_per_second": 15.354, |
|
"eval_steps_per_second": 1.922, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.7498618530116044, |
|
"grad_norm": 358.7403259277344, |
|
"learning_rate": 2.8126726837354947e-08, |
|
"loss": 0.2093, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.7498618530116044, |
|
"eval_loss": 0.9496769905090332, |
|
"eval_runtime": 78.3124, |
|
"eval_samples_per_second": 15.4, |
|
"eval_steps_per_second": 1.928, |
|
"step": 9500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 21716, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3418243974496890.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|