revusyi's picture
Training in progress, step 50, checkpoint
5f6b89c verified
{
"best_metric": 0.6681258082389832,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 0.10303967027305512,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0020607934054611026,
"grad_norm": 29.148273468017578,
"learning_rate": 0.0001,
"loss": 6.3214,
"step": 1
},
{
"epoch": 0.0020607934054611026,
"eval_loss": 9.2053804397583,
"eval_runtime": 15.2432,
"eval_samples_per_second": 13.449,
"eval_steps_per_second": 6.757,
"step": 1
},
{
"epoch": 0.004121586810922205,
"grad_norm": 36.22698974609375,
"learning_rate": 0.0002,
"loss": 6.6115,
"step": 2
},
{
"epoch": 0.0061823802163833074,
"grad_norm": 23.70931625366211,
"learning_rate": 0.00019978589232386035,
"loss": 5.1783,
"step": 3
},
{
"epoch": 0.00824317362184441,
"grad_norm": 20.946331024169922,
"learning_rate": 0.00019914448613738106,
"loss": 3.7375,
"step": 4
},
{
"epoch": 0.010303967027305513,
"grad_norm": 12.989495277404785,
"learning_rate": 0.00019807852804032305,
"loss": 2.4732,
"step": 5
},
{
"epoch": 0.012364760432766615,
"grad_norm": 16.429964065551758,
"learning_rate": 0.00019659258262890683,
"loss": 2.2351,
"step": 6
},
{
"epoch": 0.014425553838227717,
"grad_norm": 13.84890079498291,
"learning_rate": 0.0001946930129495106,
"loss": 0.9687,
"step": 7
},
{
"epoch": 0.01648634724368882,
"grad_norm": 34.05931091308594,
"learning_rate": 0.0001923879532511287,
"loss": 0.6392,
"step": 8
},
{
"epoch": 0.01854714064914992,
"grad_norm": 29.02243995666504,
"learning_rate": 0.00018968727415326884,
"loss": 1.246,
"step": 9
},
{
"epoch": 0.020607934054611025,
"grad_norm": 15.838531494140625,
"learning_rate": 0.00018660254037844388,
"loss": 0.8761,
"step": 10
},
{
"epoch": 0.02266872746007213,
"grad_norm": 14.478208541870117,
"learning_rate": 0.00018314696123025454,
"loss": 1.159,
"step": 11
},
{
"epoch": 0.02472952086553323,
"grad_norm": 10.877490997314453,
"learning_rate": 0.00017933533402912354,
"loss": 0.6689,
"step": 12
},
{
"epoch": 0.026790314270994334,
"grad_norm": 10.949230194091797,
"learning_rate": 0.00017518398074789775,
"loss": 0.3131,
"step": 13
},
{
"epoch": 0.028851107676455434,
"grad_norm": 7.338247776031494,
"learning_rate": 0.00017071067811865476,
"loss": 0.3484,
"step": 14
},
{
"epoch": 0.030911901081916538,
"grad_norm": 11.679816246032715,
"learning_rate": 0.00016593458151000688,
"loss": 0.4747,
"step": 15
},
{
"epoch": 0.03297269448737764,
"grad_norm": 7.882423400878906,
"learning_rate": 0.00016087614290087208,
"loss": 0.3737,
"step": 16
},
{
"epoch": 0.03503348789283874,
"grad_norm": 13.34092903137207,
"learning_rate": 0.00015555702330196023,
"loss": 0.4526,
"step": 17
},
{
"epoch": 0.03709428129829984,
"grad_norm": 9.12542724609375,
"learning_rate": 0.00015000000000000001,
"loss": 0.4328,
"step": 18
},
{
"epoch": 0.03915507470376095,
"grad_norm": 9.532864570617676,
"learning_rate": 0.00014422886902190014,
"loss": 0.4319,
"step": 19
},
{
"epoch": 0.04121586810922205,
"grad_norm": 4.404548645019531,
"learning_rate": 0.000138268343236509,
"loss": 0.2462,
"step": 20
},
{
"epoch": 0.04327666151468315,
"grad_norm": 3.681804895401001,
"learning_rate": 0.00013214394653031616,
"loss": 0.2135,
"step": 21
},
{
"epoch": 0.04533745492014426,
"grad_norm": 4.448928356170654,
"learning_rate": 0.00012588190451025207,
"loss": 0.2531,
"step": 22
},
{
"epoch": 0.04739824832560536,
"grad_norm": 13.782437324523926,
"learning_rate": 0.00011950903220161285,
"loss": 0.4945,
"step": 23
},
{
"epoch": 0.04945904173106646,
"grad_norm": 35.12315368652344,
"learning_rate": 0.00011305261922200519,
"loss": 0.9411,
"step": 24
},
{
"epoch": 0.05151983513652756,
"grad_norm": 62.50826644897461,
"learning_rate": 0.00010654031292301432,
"loss": 0.823,
"step": 25
},
{
"epoch": 0.05151983513652756,
"eval_loss": 0.6681258082389832,
"eval_runtime": 14.8471,
"eval_samples_per_second": 13.807,
"eval_steps_per_second": 6.937,
"step": 25
},
{
"epoch": 0.05358062854198867,
"grad_norm": 15.42173957824707,
"learning_rate": 0.0001,
"loss": 0.4441,
"step": 26
},
{
"epoch": 0.05564142194744977,
"grad_norm": 1.8112399578094482,
"learning_rate": 9.345968707698569e-05,
"loss": 0.0552,
"step": 27
},
{
"epoch": 0.05770221535291087,
"grad_norm": 0.7334427833557129,
"learning_rate": 8.694738077799488e-05,
"loss": 0.0083,
"step": 28
},
{
"epoch": 0.059763008758371976,
"grad_norm": 9.213266372680664,
"learning_rate": 8.049096779838719e-05,
"loss": 0.2998,
"step": 29
},
{
"epoch": 0.061823802163833076,
"grad_norm": 3.4060914516448975,
"learning_rate": 7.411809548974792e-05,
"loss": 0.0579,
"step": 30
},
{
"epoch": 0.06388459556929418,
"grad_norm": 0.07256096601486206,
"learning_rate": 6.785605346968386e-05,
"loss": 0.001,
"step": 31
},
{
"epoch": 0.06594538897475528,
"grad_norm": 0.05080750212073326,
"learning_rate": 6.173165676349103e-05,
"loss": 0.0008,
"step": 32
},
{
"epoch": 0.06800618238021638,
"grad_norm": 4.680781841278076,
"learning_rate": 5.577113097809989e-05,
"loss": 0.0752,
"step": 33
},
{
"epoch": 0.07006697578567748,
"grad_norm": 1.9387208223342896,
"learning_rate": 5.000000000000002e-05,
"loss": 0.0059,
"step": 34
},
{
"epoch": 0.07212776919113859,
"grad_norm": 0.2535548806190491,
"learning_rate": 4.444297669803981e-05,
"loss": 0.002,
"step": 35
},
{
"epoch": 0.07418856259659969,
"grad_norm": 0.1797764152288437,
"learning_rate": 3.9123857099127936e-05,
"loss": 0.0015,
"step": 36
},
{
"epoch": 0.0762493560020608,
"grad_norm": 0.1363029181957245,
"learning_rate": 3.406541848999312e-05,
"loss": 0.0014,
"step": 37
},
{
"epoch": 0.0783101494075219,
"grad_norm": 0.08102747797966003,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.0009,
"step": 38
},
{
"epoch": 0.080370942812983,
"grad_norm": 0.08120626956224442,
"learning_rate": 2.4816019252102273e-05,
"loss": 0.0009,
"step": 39
},
{
"epoch": 0.0824317362184441,
"grad_norm": 0.06320202350616455,
"learning_rate": 2.0664665970876496e-05,
"loss": 0.0007,
"step": 40
},
{
"epoch": 0.0844925296239052,
"grad_norm": 0.06635958701372147,
"learning_rate": 1.6853038769745467e-05,
"loss": 0.0007,
"step": 41
},
{
"epoch": 0.0865533230293663,
"grad_norm": 0.04807131737470627,
"learning_rate": 1.339745962155613e-05,
"loss": 0.0006,
"step": 42
},
{
"epoch": 0.0886141164348274,
"grad_norm": 0.06771901249885559,
"learning_rate": 1.0312725846731175e-05,
"loss": 0.0007,
"step": 43
},
{
"epoch": 0.09067490984028852,
"grad_norm": 0.046476930379867554,
"learning_rate": 7.612046748871327e-06,
"loss": 0.0005,
"step": 44
},
{
"epoch": 0.09273570324574962,
"grad_norm": 13.438279151916504,
"learning_rate": 5.306987050489442e-06,
"loss": 0.0353,
"step": 45
},
{
"epoch": 0.09479649665121072,
"grad_norm": 20.22861099243164,
"learning_rate": 3.40741737109318e-06,
"loss": 0.05,
"step": 46
},
{
"epoch": 0.09685729005667182,
"grad_norm": 26.98645782470703,
"learning_rate": 1.921471959676957e-06,
"loss": 0.0715,
"step": 47
},
{
"epoch": 0.09891808346213292,
"grad_norm": 26.829103469848633,
"learning_rate": 8.555138626189618e-07,
"loss": 0.0614,
"step": 48
},
{
"epoch": 0.10097887686759402,
"grad_norm": 35.39474868774414,
"learning_rate": 2.141076761396521e-07,
"loss": 0.0936,
"step": 49
},
{
"epoch": 0.10303967027305512,
"grad_norm": 19.681806564331055,
"learning_rate": 0.0,
"loss": 0.041,
"step": 50
},
{
"epoch": 0.10303967027305512,
"eval_loss": 1.1938879489898682,
"eval_runtime": 14.8623,
"eval_samples_per_second": 13.793,
"eval_steps_per_second": 6.93,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3604771504128000.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}