bbytxt's picture
Training in progress, step 75, checkpoint
7a5f9ac verified
raw
history blame
14 kB
{
"best_metric": 0.3578774929046631,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.644468313641246,
"eval_steps": 50,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008592910848549946,
"grad_norm": 24.527587890625,
"learning_rate": 5e-06,
"loss": 26.7779,
"step": 1
},
{
"epoch": 0.008592910848549946,
"eval_loss": 1.7484409809112549,
"eval_runtime": 49.6443,
"eval_samples_per_second": 7.896,
"eval_steps_per_second": 1.974,
"step": 1
},
{
"epoch": 0.017185821697099892,
"grad_norm": 37.25108337402344,
"learning_rate": 1e-05,
"loss": 30.2279,
"step": 2
},
{
"epoch": 0.02577873254564984,
"grad_norm": 26.99140167236328,
"learning_rate": 1.5e-05,
"loss": 30.4207,
"step": 3
},
{
"epoch": 0.034371643394199784,
"grad_norm": 36.530818939208984,
"learning_rate": 2e-05,
"loss": 32.068,
"step": 4
},
{
"epoch": 0.04296455424274973,
"grad_norm": 21.84710693359375,
"learning_rate": 2.5e-05,
"loss": 26.86,
"step": 5
},
{
"epoch": 0.05155746509129968,
"grad_norm": 28.68981170654297,
"learning_rate": 3e-05,
"loss": 28.5753,
"step": 6
},
{
"epoch": 0.06015037593984962,
"grad_norm": 24.717876434326172,
"learning_rate": 3.5e-05,
"loss": 23.4737,
"step": 7
},
{
"epoch": 0.06874328678839957,
"grad_norm": 31.985191345214844,
"learning_rate": 4e-05,
"loss": 23.7119,
"step": 8
},
{
"epoch": 0.07733619763694952,
"grad_norm": 24.535207748413086,
"learning_rate": 4.5e-05,
"loss": 21.2995,
"step": 9
},
{
"epoch": 0.08592910848549946,
"grad_norm": 20.784114837646484,
"learning_rate": 5e-05,
"loss": 19.4062,
"step": 10
},
{
"epoch": 0.09452201933404941,
"grad_norm": 22.949960708618164,
"learning_rate": 5.500000000000001e-05,
"loss": 19.894,
"step": 11
},
{
"epoch": 0.10311493018259936,
"grad_norm": 18.90312957763672,
"learning_rate": 6e-05,
"loss": 15.6992,
"step": 12
},
{
"epoch": 0.11170784103114931,
"grad_norm": 12.863642692565918,
"learning_rate": 6.500000000000001e-05,
"loss": 13.7171,
"step": 13
},
{
"epoch": 0.12030075187969924,
"grad_norm": 10.37125015258789,
"learning_rate": 7e-05,
"loss": 13.0682,
"step": 14
},
{
"epoch": 0.1288936627282492,
"grad_norm": 11.138198852539062,
"learning_rate": 7.500000000000001e-05,
"loss": 12.1682,
"step": 15
},
{
"epoch": 0.13748657357679914,
"grad_norm": 14.989235877990723,
"learning_rate": 8e-05,
"loss": 11.8289,
"step": 16
},
{
"epoch": 0.1460794844253491,
"grad_norm": 11.183653831481934,
"learning_rate": 8.5e-05,
"loss": 9.9752,
"step": 17
},
{
"epoch": 0.15467239527389903,
"grad_norm": 10.320233345031738,
"learning_rate": 9e-05,
"loss": 8.4,
"step": 18
},
{
"epoch": 0.16326530612244897,
"grad_norm": 7.7508039474487305,
"learning_rate": 9.5e-05,
"loss": 8.694,
"step": 19
},
{
"epoch": 0.17185821697099893,
"grad_norm": 8.021259307861328,
"learning_rate": 0.0001,
"loss": 8.0313,
"step": 20
},
{
"epoch": 0.18045112781954886,
"grad_norm": 7.663169860839844,
"learning_rate": 9.991845519630678e-05,
"loss": 10.0139,
"step": 21
},
{
"epoch": 0.18904403866809882,
"grad_norm": 7.0549798011779785,
"learning_rate": 9.967408676742751e-05,
"loss": 8.3892,
"step": 22
},
{
"epoch": 0.19763694951664876,
"grad_norm": 6.753633975982666,
"learning_rate": 9.926769179238466e-05,
"loss": 7.8297,
"step": 23
},
{
"epoch": 0.20622986036519872,
"grad_norm": 7.454422950744629,
"learning_rate": 9.870059584711668e-05,
"loss": 7.9531,
"step": 24
},
{
"epoch": 0.21482277121374865,
"grad_norm": 4.801022052764893,
"learning_rate": 9.797464868072488e-05,
"loss": 7.3965,
"step": 25
},
{
"epoch": 0.22341568206229862,
"grad_norm": 5.871040344238281,
"learning_rate": 9.709221818197624e-05,
"loss": 8.6411,
"step": 26
},
{
"epoch": 0.23200859291084855,
"grad_norm": 6.431129455566406,
"learning_rate": 9.60561826557425e-05,
"loss": 6.2586,
"step": 27
},
{
"epoch": 0.24060150375939848,
"grad_norm": 4.8356032371521,
"learning_rate": 9.486992143456792e-05,
"loss": 6.9354,
"step": 28
},
{
"epoch": 0.24919441460794844,
"grad_norm": 4.676732540130615,
"learning_rate": 9.353730385598887e-05,
"loss": 7.0826,
"step": 29
},
{
"epoch": 0.2577873254564984,
"grad_norm": 4.927655220031738,
"learning_rate": 9.206267664155907e-05,
"loss": 6.9211,
"step": 30
},
{
"epoch": 0.2663802363050483,
"grad_norm": 5.393516540527344,
"learning_rate": 9.045084971874738e-05,
"loss": 6.5549,
"step": 31
},
{
"epoch": 0.2749731471535983,
"grad_norm": 5.347249984741211,
"learning_rate": 8.870708053195413e-05,
"loss": 7.0554,
"step": 32
},
{
"epoch": 0.28356605800214824,
"grad_norm": 5.3050312995910645,
"learning_rate": 8.683705689382024e-05,
"loss": 6.4794,
"step": 33
},
{
"epoch": 0.2921589688506982,
"grad_norm": 5.1706037521362305,
"learning_rate": 8.484687843276469e-05,
"loss": 6.7291,
"step": 34
},
{
"epoch": 0.3007518796992481,
"grad_norm": 5.202975749969482,
"learning_rate": 8.274303669726426e-05,
"loss": 6.4802,
"step": 35
},
{
"epoch": 0.30934479054779807,
"grad_norm": 5.066944599151611,
"learning_rate": 8.053239398177191e-05,
"loss": 5.5754,
"step": 36
},
{
"epoch": 0.317937701396348,
"grad_norm": 5.513393878936768,
"learning_rate": 7.822216094333847e-05,
"loss": 5.9398,
"step": 37
},
{
"epoch": 0.32653061224489793,
"grad_norm": 6.0009565353393555,
"learning_rate": 7.58198730819481e-05,
"loss": 6.3022,
"step": 38
},
{
"epoch": 0.3351235230934479,
"grad_norm": 5.408257007598877,
"learning_rate": 7.333336616128369e-05,
"loss": 5.3976,
"step": 39
},
{
"epoch": 0.34371643394199786,
"grad_norm": 6.3030195236206055,
"learning_rate": 7.077075065009433e-05,
"loss": 6.669,
"step": 40
},
{
"epoch": 0.3523093447905478,
"grad_norm": 4.973893642425537,
"learning_rate": 6.814038526753205e-05,
"loss": 6.0552,
"step": 41
},
{
"epoch": 0.3609022556390977,
"grad_norm": 5.095200061798096,
"learning_rate": 6.545084971874738e-05,
"loss": 5.2656,
"step": 42
},
{
"epoch": 0.3694951664876477,
"grad_norm": 5.441334247589111,
"learning_rate": 6.271091670967436e-05,
"loss": 6.0926,
"step": 43
},
{
"epoch": 0.37808807733619765,
"grad_norm": 5.907732009887695,
"learning_rate": 5.992952333228728e-05,
"loss": 6.2611,
"step": 44
},
{
"epoch": 0.3866809881847476,
"grad_norm": 5.2476348876953125,
"learning_rate": 5.7115741913664264e-05,
"loss": 5.114,
"step": 45
},
{
"epoch": 0.3952738990332975,
"grad_norm": 4.924515724182129,
"learning_rate": 5.427875042394199e-05,
"loss": 4.9915,
"step": 46
},
{
"epoch": 0.4038668098818475,
"grad_norm": 5.901587963104248,
"learning_rate": 5.142780253968481e-05,
"loss": 5.8679,
"step": 47
},
{
"epoch": 0.41245972073039744,
"grad_norm": 4.880833625793457,
"learning_rate": 4.85721974603152e-05,
"loss": 5.1338,
"step": 48
},
{
"epoch": 0.42105263157894735,
"grad_norm": 5.361891746520996,
"learning_rate": 4.5721249576058027e-05,
"loss": 5.8578,
"step": 49
},
{
"epoch": 0.4296455424274973,
"grad_norm": 5.202010631561279,
"learning_rate": 4.288425808633575e-05,
"loss": 5.4502,
"step": 50
},
{
"epoch": 0.4296455424274973,
"eval_loss": 0.3578774929046631,
"eval_runtime": 50.006,
"eval_samples_per_second": 7.839,
"eval_steps_per_second": 1.96,
"step": 50
},
{
"epoch": 0.43823845327604727,
"grad_norm": 5.766439914703369,
"learning_rate": 4.007047666771274e-05,
"loss": 4.9625,
"step": 51
},
{
"epoch": 0.44683136412459723,
"grad_norm": 5.521291255950928,
"learning_rate": 3.728908329032567e-05,
"loss": 4.9848,
"step": 52
},
{
"epoch": 0.45542427497314714,
"grad_norm": 6.785717487335205,
"learning_rate": 3.4549150281252636e-05,
"loss": 5.913,
"step": 53
},
{
"epoch": 0.4640171858216971,
"grad_norm": 6.411205768585205,
"learning_rate": 3.1859614732467954e-05,
"loss": 5.3654,
"step": 54
},
{
"epoch": 0.47261009667024706,
"grad_norm": 5.137513637542725,
"learning_rate": 2.9229249349905684e-05,
"loss": 4.7431,
"step": 55
},
{
"epoch": 0.48120300751879697,
"grad_norm": 6.2091755867004395,
"learning_rate": 2.6666633838716314e-05,
"loss": 4.7387,
"step": 56
},
{
"epoch": 0.4897959183673469,
"grad_norm": 6.7059173583984375,
"learning_rate": 2.418012691805191e-05,
"loss": 5.7861,
"step": 57
},
{
"epoch": 0.4983888292158969,
"grad_norm": 7.024590969085693,
"learning_rate": 2.1777839056661554e-05,
"loss": 5.8263,
"step": 58
},
{
"epoch": 0.5069817400644469,
"grad_norm": 6.100332736968994,
"learning_rate": 1.946760601822809e-05,
"loss": 4.9064,
"step": 59
},
{
"epoch": 0.5155746509129968,
"grad_norm": 5.492628574371338,
"learning_rate": 1.725696330273575e-05,
"loss": 4.977,
"step": 60
},
{
"epoch": 0.5241675617615468,
"grad_norm": 6.51470947265625,
"learning_rate": 1.5153121567235335e-05,
"loss": 4.8705,
"step": 61
},
{
"epoch": 0.5327604726100966,
"grad_norm": 5.699104309082031,
"learning_rate": 1.3162943106179749e-05,
"loss": 5.0406,
"step": 62
},
{
"epoch": 0.5413533834586466,
"grad_norm": 5.825130939483643,
"learning_rate": 1.1292919468045877e-05,
"loss": 5.1809,
"step": 63
},
{
"epoch": 0.5499462943071965,
"grad_norm": 6.401869773864746,
"learning_rate": 9.549150281252633e-06,
"loss": 6.0405,
"step": 64
},
{
"epoch": 0.5585392051557465,
"grad_norm": 6.119671821594238,
"learning_rate": 7.937323358440935e-06,
"loss": 5.5007,
"step": 65
},
{
"epoch": 0.5671321160042965,
"grad_norm": 6.092080593109131,
"learning_rate": 6.462696144011149e-06,
"loss": 4.8658,
"step": 66
},
{
"epoch": 0.5757250268528464,
"grad_norm": 5.349208831787109,
"learning_rate": 5.13007856543209e-06,
"loss": 4.3778,
"step": 67
},
{
"epoch": 0.5843179377013964,
"grad_norm": 5.576518535614014,
"learning_rate": 3.9438173442575e-06,
"loss": 4.7543,
"step": 68
},
{
"epoch": 0.5929108485499462,
"grad_norm": 5.412608623504639,
"learning_rate": 2.9077818180237693e-06,
"loss": 4.2394,
"step": 69
},
{
"epoch": 0.6015037593984962,
"grad_norm": 5.888413906097412,
"learning_rate": 2.0253513192751373e-06,
"loss": 5.4023,
"step": 70
},
{
"epoch": 0.6100966702470462,
"grad_norm": 5.344703197479248,
"learning_rate": 1.2994041528833266e-06,
"loss": 4.2625,
"step": 71
},
{
"epoch": 0.6186895810955961,
"grad_norm": 5.677237033843994,
"learning_rate": 7.323082076153509e-07,
"loss": 4.9872,
"step": 72
},
{
"epoch": 0.6272824919441461,
"grad_norm": 6.14519739151001,
"learning_rate": 3.2591323257248893e-07,
"loss": 4.9022,
"step": 73
},
{
"epoch": 0.635875402792696,
"grad_norm": 5.074471950531006,
"learning_rate": 8.15448036932176e-08,
"loss": 4.4077,
"step": 74
},
{
"epoch": 0.644468313641246,
"grad_norm": 5.900351047515869,
"learning_rate": 0.0,
"loss": 5.002,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.991525977489408e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}