|
{ |
|
"best_metric": 0.22316619753837585, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.9799918334013883, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013066557778685178, |
|
"grad_norm": 1.6932507753372192, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.2152, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013066557778685178, |
|
"eval_loss": 1.5535104274749756, |
|
"eval_runtime": 1.5142, |
|
"eval_samples_per_second": 33.021, |
|
"eval_steps_per_second": 8.585, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.026133115557370357, |
|
"grad_norm": 2.5225236415863037, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.3169, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.039199673336055535, |
|
"grad_norm": 2.8033957481384277, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3364, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05226623111474071, |
|
"grad_norm": 2.008617877960205, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 1.2427, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06533278889342589, |
|
"grad_norm": 2.0534799098968506, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 1.3013, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07839934667211107, |
|
"grad_norm": 2.1992785930633545, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 1.2599, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09146590445079625, |
|
"grad_norm": 2.2852184772491455, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 1.1142, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.10453246222948143, |
|
"grad_norm": 1.6609108448028564, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 1.521, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1175990200081666, |
|
"grad_norm": 2.5087709426879883, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 1.3596, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13066557778685178, |
|
"grad_norm": 2.0769166946411133, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 1.2476, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14373213556553696, |
|
"grad_norm": 2.1793136596679688, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 1.3515, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15679869334422214, |
|
"grad_norm": 3.35315203666687, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 1.6377, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.16986525112290732, |
|
"grad_norm": 1.5212485790252686, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 0.9341, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1829318089015925, |
|
"grad_norm": 1.2225775718688965, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 0.9495, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.19599836668027767, |
|
"grad_norm": 2.4777283668518066, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 0.7017, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.20906492445896285, |
|
"grad_norm": 1.527767300605774, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 0.7389, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.22213148223764803, |
|
"grad_norm": 1.3528121709823608, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 0.8243, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2351980400163332, |
|
"grad_norm": 1.3979260921478271, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.7762, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2482645977950184, |
|
"grad_norm": 2.6196300983428955, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 0.6698, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.26133115557370357, |
|
"grad_norm": 1.5927413702011108, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 0.8977, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2743977133523887, |
|
"grad_norm": 1.7954849004745483, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.12, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2874642711310739, |
|
"grad_norm": 2.6149048805236816, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 0.9328, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3005308289097591, |
|
"grad_norm": 2.0966615676879883, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 0.8724, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3135973866884443, |
|
"grad_norm": 2.034907579421997, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 1.0333, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.32666394446712943, |
|
"grad_norm": 3.575545310974121, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 1.5723, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.32666394446712943, |
|
"eval_loss": 0.5558741092681885, |
|
"eval_runtime": 1.5449, |
|
"eval_samples_per_second": 32.364, |
|
"eval_steps_per_second": 8.415, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.33973050224581464, |
|
"grad_norm": 1.975691318511963, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 0.5943, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3527970600244998, |
|
"grad_norm": 1.5834639072418213, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.5643, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.365863617803185, |
|
"grad_norm": 1.6365801095962524, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 0.4526, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.37893017558187014, |
|
"grad_norm": 1.463999629020691, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 0.4972, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.39199673336055535, |
|
"grad_norm": 1.2677172422409058, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.5389, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4050632911392405, |
|
"grad_norm": 2.432539224624634, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 0.4597, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4181298489179257, |
|
"grad_norm": 2.4917757511138916, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 0.4929, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.43119640669661086, |
|
"grad_norm": 1.3739546537399292, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.9248, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.44426296447529606, |
|
"grad_norm": 1.6721895933151245, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 0.6351, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4573295222539812, |
|
"grad_norm": 1.3729504346847534, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 0.5844, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4703960800326664, |
|
"grad_norm": 1.3768748044967651, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.6757, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.48346263781135157, |
|
"grad_norm": 2.0230472087860107, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 1.0381, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.4965291955900368, |
|
"grad_norm": 1.3043603897094727, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 0.4569, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.509595753368722, |
|
"grad_norm": 0.9272847175598145, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.4936, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5226623111474071, |
|
"grad_norm": 1.6384899616241455, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 0.4535, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5357288689260923, |
|
"grad_norm": 1.546187400817871, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 0.3763, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5487954267047774, |
|
"grad_norm": 1.2767608165740967, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.4207, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5618619844834626, |
|
"grad_norm": 1.1065655946731567, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 0.3935, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5749285422621478, |
|
"grad_norm": 2.4505350589752197, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 0.2702, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.587995100040833, |
|
"grad_norm": 1.1374635696411133, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.6616, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6010616578195181, |
|
"grad_norm": 1.3470594882965088, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 0.7169, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6141282155982033, |
|
"grad_norm": 1.4238591194152832, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 0.4089, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6271947733768886, |
|
"grad_norm": 1.1624187231063843, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.5297, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6402613311555737, |
|
"grad_norm": 1.2812923192977905, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 0.5576, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6533278889342589, |
|
"grad_norm": 3.029040575027466, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 0.9427, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6533278889342589, |
|
"eval_loss": 0.3033914864063263, |
|
"eval_runtime": 1.537, |
|
"eval_samples_per_second": 32.532, |
|
"eval_steps_per_second": 8.458, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.666394446712944, |
|
"grad_norm": 0.9900059103965759, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.3231, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6794610044916293, |
|
"grad_norm": 0.8345087766647339, |
|
"learning_rate": 3.082151762439293e-05, |
|
"loss": 0.3644, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6925275622703144, |
|
"grad_norm": 0.8801731467247009, |
|
"learning_rate": 2.9189060364202943e-05, |
|
"loss": 0.2791, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.7055941200489996, |
|
"grad_norm": 1.1393957138061523, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.2965, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.7186606778276847, |
|
"grad_norm": 0.9782978892326355, |
|
"learning_rate": 2.6074557564105727e-05, |
|
"loss": 0.2393, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.73172723560637, |
|
"grad_norm": 0.8556644916534424, |
|
"learning_rate": 2.459844065729529e-05, |
|
"loss": 0.2027, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.7447937933850551, |
|
"grad_norm": 1.0292524099349976, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.275, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7578603511637403, |
|
"grad_norm": 1.2255464792251587, |
|
"learning_rate": 2.1822519843544424e-05, |
|
"loss": 0.6167, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7709269089424254, |
|
"grad_norm": 1.345024824142456, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 0.51, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.7839934667211107, |
|
"grad_norm": 2.0753092765808105, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.3936, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7970600244997958, |
|
"grad_norm": 1.9613157510757446, |
|
"learning_rate": 1.8138158006995364e-05, |
|
"loss": 0.4177, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.810126582278481, |
|
"grad_norm": 1.782265543937683, |
|
"learning_rate": 1.7047384938420154e-05, |
|
"loss": 0.5354, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.8231931400571662, |
|
"grad_norm": 0.880858302116394, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.227, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.8362596978358514, |
|
"grad_norm": 0.643760621547699, |
|
"learning_rate": 1.5084512506980026e-05, |
|
"loss": 0.339, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8493262556145366, |
|
"grad_norm": 0.610495924949646, |
|
"learning_rate": 1.4216149583350754e-05, |
|
"loss": 0.2043, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8623928133932217, |
|
"grad_norm": 0.7079125642776489, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.2392, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.8754593711719069, |
|
"grad_norm": 0.6936648488044739, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 0.2807, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.8885259289505921, |
|
"grad_norm": 0.7526812553405762, |
|
"learning_rate": 1.2082737216329794e-05, |
|
"loss": 0.2831, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9015924867292773, |
|
"grad_norm": 1.716686725616455, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.117, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9146590445079624, |
|
"grad_norm": 0.9851632714271545, |
|
"learning_rate": 1.1066679679603e-05, |
|
"loss": 0.4664, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9277256022866476, |
|
"grad_norm": 1.2416988611221313, |
|
"learning_rate": 1.0683651114450641e-05, |
|
"loss": 0.6231, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.9407921600653328, |
|
"grad_norm": 0.8936472535133362, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.3633, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.953858717844018, |
|
"grad_norm": 1.051101803779602, |
|
"learning_rate": 1.017123858587145e-05, |
|
"loss": 0.4201, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9669252756227031, |
|
"grad_norm": 1.0521873235702515, |
|
"learning_rate": 1.00428300288164e-05, |
|
"loss": 0.395, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.9799918334013883, |
|
"grad_norm": 2.4216976165771484, |
|
"learning_rate": 1e-05, |
|
"loss": 0.6176, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9799918334013883, |
|
"eval_loss": 0.22316619753837585, |
|
"eval_runtime": 1.5353, |
|
"eval_samples_per_second": 32.566, |
|
"eval_steps_per_second": 8.467, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.42951368966144e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|