|
{ |
|
"best_metric": 0.0040573906153440475, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.008424954715868403, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00016849909431736806, |
|
"grad_norm": 0.5649989247322083, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4213, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00016849909431736806, |
|
"eval_loss": 0.5909122824668884, |
|
"eval_runtime": 544.2099, |
|
"eval_samples_per_second": 18.366, |
|
"eval_steps_per_second": 2.297, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003369981886347361, |
|
"grad_norm": 0.6741442680358887, |
|
"learning_rate": 0.0002, |
|
"loss": 0.5083, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0005054972829521042, |
|
"grad_norm": 0.5923657417297363, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 0.4534, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006739963772694722, |
|
"grad_norm": 0.6023609042167664, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 0.2927, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0008424954715868402, |
|
"grad_norm": 0.6663532257080078, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 0.1444, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0010109945659042083, |
|
"grad_norm": 0.4367070496082306, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 0.0676, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0011794936602215764, |
|
"grad_norm": 0.15941502153873444, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 0.0139, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0013479927545389445, |
|
"grad_norm": 0.08275063335895538, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.0069, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0015164918488563123, |
|
"grad_norm": 0.6395729780197144, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 0.0421, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0016849909431736804, |
|
"grad_norm": 0.12541823089122772, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.0053, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0018534900374910484, |
|
"grad_norm": 0.3203907012939453, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 0.0044, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0020219891318084167, |
|
"grad_norm": 0.28198617696762085, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 0.0118, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0021904882261257847, |
|
"grad_norm": 1.7291009426116943, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 0.0402, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.002358987320443153, |
|
"grad_norm": 1.925735592842102, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.0377, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.002527486414760521, |
|
"grad_norm": 0.37905728816986084, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 0.0106, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002695985509077889, |
|
"grad_norm": 0.2949874699115753, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 0.0078, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0028644846033952565, |
|
"grad_norm": 0.21088016033172607, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.0062, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0030329836977126246, |
|
"grad_norm": 0.04623870924115181, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.0025, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0032014827920299927, |
|
"grad_norm": 0.0938965305685997, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 0.0081, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0033699818863473607, |
|
"grad_norm": 0.14240026473999023, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.0085, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0035384809806647288, |
|
"grad_norm": 0.05777126923203468, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 0.0017, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003706980074982097, |
|
"grad_norm": 0.04713433235883713, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.0037, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.003875479169299465, |
|
"grad_norm": 0.1591893881559372, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.011, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.004043978263616833, |
|
"grad_norm": 0.05540703609585762, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.0044, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0042124773579342014, |
|
"grad_norm": 0.023379579186439514, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 0.0012, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0042124773579342014, |
|
"eval_loss": 0.006916701328009367, |
|
"eval_runtime": 544.1604, |
|
"eval_samples_per_second": 18.368, |
|
"eval_steps_per_second": 2.297, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0043809764522515695, |
|
"grad_norm": 0.19603978097438812, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0213, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0045494755465689376, |
|
"grad_norm": 0.5282886028289795, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 0.0656, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004717974640886306, |
|
"grad_norm": 0.0671163946390152, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.0094, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004886473735203674, |
|
"grad_norm": 0.05665583908557892, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.0151, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.005054972829521042, |
|
"grad_norm": 0.07490652799606323, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.0009, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00522347192383841, |
|
"grad_norm": 0.037155721336603165, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 0.0024, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.005391971018155778, |
|
"grad_norm": 0.05099332705140114, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.0078, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.005560470112473145, |
|
"grad_norm": 0.17277608811855316, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 0.0109, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.005728969206790513, |
|
"grad_norm": 0.04528217762708664, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.0052, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005897468301107881, |
|
"grad_norm": 0.008220179006457329, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.0004, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.006065967395425249, |
|
"grad_norm": 0.012407345697283745, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.0006, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.006234466489742617, |
|
"grad_norm": 0.017425227910280228, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 0.0024, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.006402965584059985, |
|
"grad_norm": 0.22802281379699707, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.0073, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.006571464678377353, |
|
"grad_norm": 0.0339801162481308, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.0057, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.006739963772694721, |
|
"grad_norm": 0.020060187205672264, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.0019, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0069084628670120895, |
|
"grad_norm": 0.03689638152718544, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.0009, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0070769619613294575, |
|
"grad_norm": 0.05137154832482338, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.0044, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.007245461055646826, |
|
"grad_norm": 0.11253440380096436, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.0022, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.007413960149964194, |
|
"grad_norm": 0.009691456332802773, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.0005, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.007582459244281562, |
|
"grad_norm": 0.09334763884544373, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.0031, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.00775095833859893, |
|
"grad_norm": 0.025010887533426285, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.0011, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.007919457432916298, |
|
"grad_norm": 0.06059917062520981, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.0068, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.008087956527233667, |
|
"grad_norm": 0.0202567707747221, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.0009, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.008256455621551034, |
|
"grad_norm": 0.020429080352187157, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.003, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.008424954715868403, |
|
"grad_norm": 0.06769539415836334, |
|
"learning_rate": 0.0, |
|
"loss": 0.0056, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.008424954715868403, |
|
"eval_loss": 0.0040573906153440475, |
|
"eval_runtime": 544.1235, |
|
"eval_samples_per_second": 18.369, |
|
"eval_steps_per_second": 2.297, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.796154706198528e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|