|
{ |
|
"best_metric": 0.7458393573760986, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l57-l/checkpoint-4000", |
|
"epoch": 0.42662116040955633, |
|
"eval_steps": 500, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05332764505119454, |
|
"grad_norm": 414.62841796875, |
|
"learning_rate": 4.911120591581342e-07, |
|
"loss": 0.3812, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.05332764505119454, |
|
"eval_loss": 1.1163015365600586, |
|
"eval_runtime": 61.8962, |
|
"eval_samples_per_second": 15.946, |
|
"eval_steps_per_second": 2.003, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.10665529010238908, |
|
"grad_norm": 34.08418655395508, |
|
"learning_rate": 4.822241183162685e-07, |
|
"loss": 0.2683, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.10665529010238908, |
|
"eval_loss": 0.9684447646141052, |
|
"eval_runtime": 61.821, |
|
"eval_samples_per_second": 15.965, |
|
"eval_steps_per_second": 2.006, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.1599829351535836, |
|
"grad_norm": 350.4574890136719, |
|
"learning_rate": 4.733361774744027e-07, |
|
"loss": 0.2119, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.1599829351535836, |
|
"eval_loss": 0.9099885821342468, |
|
"eval_runtime": 61.9606, |
|
"eval_samples_per_second": 15.929, |
|
"eval_steps_per_second": 2.001, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.21331058020477817, |
|
"grad_norm": 0.01787523180246353, |
|
"learning_rate": 4.6444823663253695e-07, |
|
"loss": 0.1889, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.21331058020477817, |
|
"eval_loss": 0.8620074987411499, |
|
"eval_runtime": 63.2824, |
|
"eval_samples_per_second": 15.597, |
|
"eval_steps_per_second": 1.959, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.2666382252559727, |
|
"grad_norm": 118.66907501220703, |
|
"learning_rate": 4.5556029579067116e-07, |
|
"loss": 0.2071, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.2666382252559727, |
|
"eval_loss": 0.791786253452301, |
|
"eval_runtime": 63.7932, |
|
"eval_samples_per_second": 15.472, |
|
"eval_steps_per_second": 1.944, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.3199658703071672, |
|
"grad_norm": 7.21087323185543e-09, |
|
"learning_rate": 4.4667235494880547e-07, |
|
"loss": 0.1588, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.3199658703071672, |
|
"eval_loss": 0.7657254338264465, |
|
"eval_runtime": 62.4738, |
|
"eval_samples_per_second": 15.799, |
|
"eval_steps_per_second": 1.985, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.37329351535836175, |
|
"grad_norm": 0.0014836661284789443, |
|
"learning_rate": 4.377844141069397e-07, |
|
"loss": 0.1718, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.37329351535836175, |
|
"eval_loss": 0.7610095739364624, |
|
"eval_runtime": 63.9118, |
|
"eval_samples_per_second": 15.443, |
|
"eval_steps_per_second": 1.94, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.42662116040955633, |
|
"grad_norm": 3.2739710807800293, |
|
"learning_rate": 4.2889647326507393e-07, |
|
"loss": 0.1113, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.42662116040955633, |
|
"eval_loss": 0.7458393573760986, |
|
"eval_runtime": 62.1803, |
|
"eval_samples_per_second": 15.873, |
|
"eval_steps_per_second": 1.994, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 28128, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1439752921742520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|