bbytxt's picture
Training in progress, step 50, checkpoint
39a402d verified
raw
history blame
9.77 kB
{
"best_metric": 0.3578774929046631,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.4296455424274973,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008592910848549946,
"grad_norm": 24.527587890625,
"learning_rate": 5e-06,
"loss": 26.7779,
"step": 1
},
{
"epoch": 0.008592910848549946,
"eval_loss": 1.7484409809112549,
"eval_runtime": 49.6443,
"eval_samples_per_second": 7.896,
"eval_steps_per_second": 1.974,
"step": 1
},
{
"epoch": 0.017185821697099892,
"grad_norm": 37.25108337402344,
"learning_rate": 1e-05,
"loss": 30.2279,
"step": 2
},
{
"epoch": 0.02577873254564984,
"grad_norm": 26.99140167236328,
"learning_rate": 1.5e-05,
"loss": 30.4207,
"step": 3
},
{
"epoch": 0.034371643394199784,
"grad_norm": 36.530818939208984,
"learning_rate": 2e-05,
"loss": 32.068,
"step": 4
},
{
"epoch": 0.04296455424274973,
"grad_norm": 21.84710693359375,
"learning_rate": 2.5e-05,
"loss": 26.86,
"step": 5
},
{
"epoch": 0.05155746509129968,
"grad_norm": 28.68981170654297,
"learning_rate": 3e-05,
"loss": 28.5753,
"step": 6
},
{
"epoch": 0.06015037593984962,
"grad_norm": 24.717876434326172,
"learning_rate": 3.5e-05,
"loss": 23.4737,
"step": 7
},
{
"epoch": 0.06874328678839957,
"grad_norm": 31.985191345214844,
"learning_rate": 4e-05,
"loss": 23.7119,
"step": 8
},
{
"epoch": 0.07733619763694952,
"grad_norm": 24.535207748413086,
"learning_rate": 4.5e-05,
"loss": 21.2995,
"step": 9
},
{
"epoch": 0.08592910848549946,
"grad_norm": 20.784114837646484,
"learning_rate": 5e-05,
"loss": 19.4062,
"step": 10
},
{
"epoch": 0.09452201933404941,
"grad_norm": 22.949960708618164,
"learning_rate": 5.500000000000001e-05,
"loss": 19.894,
"step": 11
},
{
"epoch": 0.10311493018259936,
"grad_norm": 18.90312957763672,
"learning_rate": 6e-05,
"loss": 15.6992,
"step": 12
},
{
"epoch": 0.11170784103114931,
"grad_norm": 12.863642692565918,
"learning_rate": 6.500000000000001e-05,
"loss": 13.7171,
"step": 13
},
{
"epoch": 0.12030075187969924,
"grad_norm": 10.37125015258789,
"learning_rate": 7e-05,
"loss": 13.0682,
"step": 14
},
{
"epoch": 0.1288936627282492,
"grad_norm": 11.138198852539062,
"learning_rate": 7.500000000000001e-05,
"loss": 12.1682,
"step": 15
},
{
"epoch": 0.13748657357679914,
"grad_norm": 14.989235877990723,
"learning_rate": 8e-05,
"loss": 11.8289,
"step": 16
},
{
"epoch": 0.1460794844253491,
"grad_norm": 11.183653831481934,
"learning_rate": 8.5e-05,
"loss": 9.9752,
"step": 17
},
{
"epoch": 0.15467239527389903,
"grad_norm": 10.320233345031738,
"learning_rate": 9e-05,
"loss": 8.4,
"step": 18
},
{
"epoch": 0.16326530612244897,
"grad_norm": 7.7508039474487305,
"learning_rate": 9.5e-05,
"loss": 8.694,
"step": 19
},
{
"epoch": 0.17185821697099893,
"grad_norm": 8.021259307861328,
"learning_rate": 0.0001,
"loss": 8.0313,
"step": 20
},
{
"epoch": 0.18045112781954886,
"grad_norm": 7.663169860839844,
"learning_rate": 9.991845519630678e-05,
"loss": 10.0139,
"step": 21
},
{
"epoch": 0.18904403866809882,
"grad_norm": 7.0549798011779785,
"learning_rate": 9.967408676742751e-05,
"loss": 8.3892,
"step": 22
},
{
"epoch": 0.19763694951664876,
"grad_norm": 6.753633975982666,
"learning_rate": 9.926769179238466e-05,
"loss": 7.8297,
"step": 23
},
{
"epoch": 0.20622986036519872,
"grad_norm": 7.454422950744629,
"learning_rate": 9.870059584711668e-05,
"loss": 7.9531,
"step": 24
},
{
"epoch": 0.21482277121374865,
"grad_norm": 4.801022052764893,
"learning_rate": 9.797464868072488e-05,
"loss": 7.3965,
"step": 25
},
{
"epoch": 0.22341568206229862,
"grad_norm": 5.871040344238281,
"learning_rate": 9.709221818197624e-05,
"loss": 8.6411,
"step": 26
},
{
"epoch": 0.23200859291084855,
"grad_norm": 6.431129455566406,
"learning_rate": 9.60561826557425e-05,
"loss": 6.2586,
"step": 27
},
{
"epoch": 0.24060150375939848,
"grad_norm": 4.8356032371521,
"learning_rate": 9.486992143456792e-05,
"loss": 6.9354,
"step": 28
},
{
"epoch": 0.24919441460794844,
"grad_norm": 4.676732540130615,
"learning_rate": 9.353730385598887e-05,
"loss": 7.0826,
"step": 29
},
{
"epoch": 0.2577873254564984,
"grad_norm": 4.927655220031738,
"learning_rate": 9.206267664155907e-05,
"loss": 6.9211,
"step": 30
},
{
"epoch": 0.2663802363050483,
"grad_norm": 5.393516540527344,
"learning_rate": 9.045084971874738e-05,
"loss": 6.5549,
"step": 31
},
{
"epoch": 0.2749731471535983,
"grad_norm": 5.347249984741211,
"learning_rate": 8.870708053195413e-05,
"loss": 7.0554,
"step": 32
},
{
"epoch": 0.28356605800214824,
"grad_norm": 5.3050312995910645,
"learning_rate": 8.683705689382024e-05,
"loss": 6.4794,
"step": 33
},
{
"epoch": 0.2921589688506982,
"grad_norm": 5.1706037521362305,
"learning_rate": 8.484687843276469e-05,
"loss": 6.7291,
"step": 34
},
{
"epoch": 0.3007518796992481,
"grad_norm": 5.202975749969482,
"learning_rate": 8.274303669726426e-05,
"loss": 6.4802,
"step": 35
},
{
"epoch": 0.30934479054779807,
"grad_norm": 5.066944599151611,
"learning_rate": 8.053239398177191e-05,
"loss": 5.5754,
"step": 36
},
{
"epoch": 0.317937701396348,
"grad_norm": 5.513393878936768,
"learning_rate": 7.822216094333847e-05,
"loss": 5.9398,
"step": 37
},
{
"epoch": 0.32653061224489793,
"grad_norm": 6.0009565353393555,
"learning_rate": 7.58198730819481e-05,
"loss": 6.3022,
"step": 38
},
{
"epoch": 0.3351235230934479,
"grad_norm": 5.408257007598877,
"learning_rate": 7.333336616128369e-05,
"loss": 5.3976,
"step": 39
},
{
"epoch": 0.34371643394199786,
"grad_norm": 6.3030195236206055,
"learning_rate": 7.077075065009433e-05,
"loss": 6.669,
"step": 40
},
{
"epoch": 0.3523093447905478,
"grad_norm": 4.973893642425537,
"learning_rate": 6.814038526753205e-05,
"loss": 6.0552,
"step": 41
},
{
"epoch": 0.3609022556390977,
"grad_norm": 5.095200061798096,
"learning_rate": 6.545084971874738e-05,
"loss": 5.2656,
"step": 42
},
{
"epoch": 0.3694951664876477,
"grad_norm": 5.441334247589111,
"learning_rate": 6.271091670967436e-05,
"loss": 6.0926,
"step": 43
},
{
"epoch": 0.37808807733619765,
"grad_norm": 5.907732009887695,
"learning_rate": 5.992952333228728e-05,
"loss": 6.2611,
"step": 44
},
{
"epoch": 0.3866809881847476,
"grad_norm": 5.2476348876953125,
"learning_rate": 5.7115741913664264e-05,
"loss": 5.114,
"step": 45
},
{
"epoch": 0.3952738990332975,
"grad_norm": 4.924515724182129,
"learning_rate": 5.427875042394199e-05,
"loss": 4.9915,
"step": 46
},
{
"epoch": 0.4038668098818475,
"grad_norm": 5.901587963104248,
"learning_rate": 5.142780253968481e-05,
"loss": 5.8679,
"step": 47
},
{
"epoch": 0.41245972073039744,
"grad_norm": 4.880833625793457,
"learning_rate": 4.85721974603152e-05,
"loss": 5.1338,
"step": 48
},
{
"epoch": 0.42105263157894735,
"grad_norm": 5.361891746520996,
"learning_rate": 4.5721249576058027e-05,
"loss": 5.8578,
"step": 49
},
{
"epoch": 0.4296455424274973,
"grad_norm": 5.202010631561279,
"learning_rate": 4.288425808633575e-05,
"loss": 5.4502,
"step": 50
},
{
"epoch": 0.4296455424274973,
"eval_loss": 0.3578774929046631,
"eval_runtime": 50.006,
"eval_samples_per_second": 7.839,
"eval_steps_per_second": 1.96,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.661017318326272e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}