| { | |
| "best_metric": 0.2626972794532776, | |
| "best_model_checkpoint": "./drive/Shareddrives/CS198-Drones/[v4] Training Output/vit-tiny-patch16-224_rice-leaf-disease-augmented-v4_fft/checkpoint-448", | |
| "epoch": 15.0, | |
| "eval_steps": 64, | |
| "global_step": 1920, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 9.17007064819336, | |
| "learning_rate": 7.265625e-06, | |
| "loss": 2.0564, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_accuracy": 0.4899328859060403, | |
| "eval_loss": 1.4541155099868774, | |
| "eval_runtime": 8.722, | |
| "eval_samples_per_second": 34.167, | |
| "eval_steps_per_second": 0.573, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 13.332218170166016, | |
| "learning_rate": 1.4765625e-05, | |
| "loss": 1.0767, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.7651006711409396, | |
| "eval_loss": 0.6909474730491638, | |
| "eval_runtime": 8.6475, | |
| "eval_samples_per_second": 34.461, | |
| "eval_steps_per_second": 0.578, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 9.764442443847656, | |
| "learning_rate": 2.2265625e-05, | |
| "loss": 0.4917, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "eval_accuracy": 0.8322147651006712, | |
| "eval_loss": 0.4307171106338501, | |
| "eval_runtime": 8.6026, | |
| "eval_samples_per_second": 34.641, | |
| "eval_steps_per_second": 0.581, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 9.665606498718262, | |
| "learning_rate": 2.9765625e-05, | |
| "loss": 0.285, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9026845637583892, | |
| "eval_loss": 0.2932307720184326, | |
| "eval_runtime": 7.7763, | |
| "eval_samples_per_second": 38.322, | |
| "eval_steps_per_second": 0.643, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 8.143477439880371, | |
| "learning_rate": 2.9084596206825315e-05, | |
| "loss": 0.0902, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "eval_accuracy": 0.8993288590604027, | |
| "eval_loss": 0.31344401836395264, | |
| "eval_runtime": 7.522, | |
| "eval_samples_per_second": 39.617, | |
| "eval_steps_per_second": 0.665, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 2.6106486320495605, | |
| "learning_rate": 2.633961484257573e-05, | |
| "loss": 0.0588, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9161073825503355, | |
| "eval_loss": 0.3075862526893616, | |
| "eval_runtime": 8.3334, | |
| "eval_samples_per_second": 35.76, | |
| "eval_steps_per_second": 0.6, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 1.9543571472167969, | |
| "learning_rate": 2.212085192038453e-05, | |
| "loss": 0.0155, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "eval_accuracy": 0.9395973154362416, | |
| "eval_loss": 0.2626972794532776, | |
| "eval_runtime": 8.3127, | |
| "eval_samples_per_second": 35.849, | |
| "eval_steps_per_second": 0.601, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.38745102286338806, | |
| "learning_rate": 1.6976609572058592e-05, | |
| "loss": 0.0066, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.299156129360199, | |
| "eval_runtime": 7.2879, | |
| "eval_samples_per_second": 40.89, | |
| "eval_steps_per_second": 0.686, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 0.10160534083843231, | |
| "learning_rate": 1.1575472190259976e-05, | |
| "loss": 0.0017, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.2935960590839386, | |
| "eval_runtime": 8.4642, | |
| "eval_samples_per_second": 35.207, | |
| "eval_steps_per_second": 0.591, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.13512970507144928, | |
| "learning_rate": 6.619412176671753e-06, | |
| "loss": 0.0009, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.29606831073760986, | |
| "eval_runtime": 8.1466, | |
| "eval_samples_per_second": 36.58, | |
| "eval_steps_per_second": 0.614, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 0.08882313966751099, | |
| "learning_rate": 2.7525563336129812e-06, | |
| "loss": 0.0006, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.30046388506889343, | |
| "eval_runtime": 7.478, | |
| "eval_samples_per_second": 39.85, | |
| "eval_steps_per_second": 0.669, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.08427230268716812, | |
| "learning_rate": 4.774703044353035e-07, | |
| "loss": 0.0005, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.3003771901130676, | |
| "eval_runtime": 8.3653, | |
| "eval_samples_per_second": 35.623, | |
| "eval_steps_per_second": 0.598, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "grad_norm": 0.03683856502175331, | |
| "learning_rate": 2.9910158634081504e-05, | |
| "loss": 0.0005, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "eval_accuracy": 0.9261744966442953, | |
| "eval_loss": 0.2867479920387268, | |
| "eval_runtime": 7.215, | |
| "eval_samples_per_second": 41.303, | |
| "eval_steps_per_second": 0.693, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.04806307703256607, | |
| "learning_rate": 2.8359951312200077e-05, | |
| "loss": 0.0004, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.2976870834827423, | |
| "eval_runtime": 8.3401, | |
| "eval_samples_per_second": 35.731, | |
| "eval_steps_per_second": 0.6, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 0.033956822007894516, | |
| "learning_rate": 2.5073384322705278e-05, | |
| "loss": 0.0003, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.2943709194660187, | |
| "eval_runtime": 7.6673, | |
| "eval_samples_per_second": 38.867, | |
| "eval_steps_per_second": 0.652, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.031262967735528946, | |
| "learning_rate": 2.0477604608884026e-05, | |
| "loss": 0.0002, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.30740392208099365, | |
| "eval_runtime": 8.2818, | |
| "eval_samples_per_second": 35.982, | |
| "eval_steps_per_second": 0.604, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 8.5, | |
| "grad_norm": 0.01630540005862713, | |
| "learning_rate": 1.516991423792483e-05, | |
| "loss": 0.0002, | |
| "step": 1088 | |
| }, | |
| { | |
| "epoch": 8.5, | |
| "eval_accuracy": 0.9328859060402684, | |
| "eval_loss": 0.3053071200847626, | |
| "eval_runtime": 8.2512, | |
| "eval_samples_per_second": 36.116, | |
| "eval_steps_per_second": 0.606, | |
| "step": 1088 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.016130153089761734, | |
| "learning_rate": 9.840140535762432e-06, | |
| "loss": 0.0002, | |
| "step": 1152 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.3097546696662903, | |
| "eval_runtime": 8.418, | |
| "eval_samples_per_second": 35.4, | |
| "eval_steps_per_second": 0.594, | |
| "step": 1152 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "grad_norm": 0.014978409744799137, | |
| "learning_rate": 5.180980944002794e-06, | |
| "loss": 0.0001, | |
| "step": 1216 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.310248464345932, | |
| "eval_runtime": 7.2498, | |
| "eval_samples_per_second": 41.105, | |
| "eval_steps_per_second": 0.69, | |
| "step": 1216 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.013572459109127522, | |
| "learning_rate": 1.7979748550475833e-06, | |
| "loss": 0.0001, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.9261744966442953, | |
| "eval_loss": 0.3105408847332001, | |
| "eval_runtime": 8.3297, | |
| "eval_samples_per_second": 35.776, | |
| "eval_steps_per_second": 0.6, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 10.5, | |
| "grad_norm": 0.010590254329144955, | |
| "learning_rate": 1.3080316225364152e-07, | |
| "loss": 0.0001, | |
| "step": 1344 | |
| }, | |
| { | |
| "epoch": 10.5, | |
| "eval_accuracy": 0.9261744966442953, | |
| "eval_loss": 0.3105214238166809, | |
| "eval_runtime": 7.2656, | |
| "eval_samples_per_second": 41.015, | |
| "eval_steps_per_second": 0.688, | |
| "step": 1344 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 0.011299003846943378, | |
| "learning_rate": 2.9603855973006482e-05, | |
| "loss": 0.0001, | |
| "step": 1408 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_accuracy": 0.9261744966442953, | |
| "eval_loss": 0.32019779086112976, | |
| "eval_runtime": 8.1649, | |
| "eval_samples_per_second": 36.498, | |
| "eval_steps_per_second": 0.612, | |
| "step": 1408 | |
| }, | |
| { | |
| "epoch": 11.5, | |
| "grad_norm": 0.016378453001379967, | |
| "learning_rate": 2.7440488243452587e-05, | |
| "loss": 0.0001, | |
| "step": 1472 | |
| }, | |
| { | |
| "epoch": 11.5, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.3183320462703705, | |
| "eval_runtime": 8.0678, | |
| "eval_samples_per_second": 36.937, | |
| "eval_steps_per_second": 0.62, | |
| "step": 1472 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 0.007818573154509068, | |
| "learning_rate": 2.3660261176123762e-05, | |
| "loss": 0.0001, | |
| "step": 1536 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.9328859060402684, | |
| "eval_loss": 0.3130977153778076, | |
| "eval_runtime": 8.2508, | |
| "eval_samples_per_second": 36.118, | |
| "eval_steps_per_second": 0.606, | |
| "step": 1536 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "grad_norm": 0.00784409698098898, | |
| "learning_rate": 1.875448148769462e-05, | |
| "loss": 0.0001, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "eval_accuracy": 0.9295302013422819, | |
| "eval_loss": 0.3157329261302948, | |
| "eval_runtime": 7.1968, | |
| "eval_samples_per_second": 41.407, | |
| "eval_steps_per_second": 0.695, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "grad_norm": 0.006332057528197765, | |
| "learning_rate": 1.3360741171588585e-05, | |
| "loss": 0.0001, | |
| "step": 1664 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.3237887918949127, | |
| "eval_runtime": 8.2082, | |
| "eval_samples_per_second": 36.305, | |
| "eval_steps_per_second": 0.609, | |
| "step": 1664 | |
| }, | |
| { | |
| "epoch": 13.5, | |
| "grad_norm": 0.006929404567927122, | |
| "learning_rate": 8.180051251245103e-06, | |
| "loss": 0.0001, | |
| "step": 1728 | |
| }, | |
| { | |
| "epoch": 13.5, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.3219589591026306, | |
| "eval_runtime": 7.2368, | |
| "eval_samples_per_second": 41.179, | |
| "eval_steps_per_second": 0.691, | |
| "step": 1728 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "grad_norm": 0.005624026525765657, | |
| "learning_rate": 3.885733119675617e-06, | |
| "loss": 0.0001, | |
| "step": 1792 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.3266230821609497, | |
| "eval_runtime": 7.1645, | |
| "eval_samples_per_second": 41.594, | |
| "eval_steps_per_second": 0.698, | |
| "step": 1792 | |
| }, | |
| { | |
| "epoch": 14.5, | |
| "grad_norm": 0.006068665534257889, | |
| "learning_rate": 1.0359086314671929e-06, | |
| "loss": 0.0001, | |
| "step": 1856 | |
| }, | |
| { | |
| "epoch": 14.5, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.32735249400138855, | |
| "eval_runtime": 7.4237, | |
| "eval_samples_per_second": 40.142, | |
| "eval_steps_per_second": 0.674, | |
| "step": 1856 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 0.007146658841520548, | |
| "learning_rate": 9.62392481628771e-10, | |
| "loss": 0.0001, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_accuracy": 0.9228187919463087, | |
| "eval_loss": 0.32691627740859985, | |
| "eval_runtime": 7.1631, | |
| "eval_samples_per_second": 41.602, | |
| "eval_steps_per_second": 0.698, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "step": 1920, | |
| "total_flos": 6.132781352484864e+17, | |
| "train_loss": 0.1362585227402936, | |
| "train_runtime": 4195.4959, | |
| "train_samples_per_second": 29.289, | |
| "train_steps_per_second": 0.458 | |
| } | |
| ], | |
| "logging_steps": 64, | |
| "max_steps": 1920, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 64, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.132781352484864e+17, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |