|
{ |
|
"best_metric": 0.11645537614822388, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.1429587482219061, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02275960170697013, |
|
"grad_norm": 1.7705003023147583, |
|
"learning_rate": 0.00015, |
|
"loss": 3.3287, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02275960170697013, |
|
"eval_loss": 3.4932775497436523, |
|
"eval_runtime": 0.2913, |
|
"eval_samples_per_second": 171.665, |
|
"eval_steps_per_second": 44.633, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04551920341394026, |
|
"grad_norm": 1.6582751274108887, |
|
"learning_rate": 0.0003, |
|
"loss": 3.318, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.06827880512091039, |
|
"grad_norm": 1.748265266418457, |
|
"learning_rate": 0.00029990993452998227, |
|
"loss": 3.3114, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.09103840682788052, |
|
"grad_norm": 1.5232975482940674, |
|
"learning_rate": 0.00029963985829457943, |
|
"loss": 2.9468, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.11379800853485064, |
|
"grad_norm": 1.5051178932189941, |
|
"learning_rate": 0.0002991901316573927, |
|
"loss": 2.7052, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.13655761024182078, |
|
"grad_norm": 1.422306776046753, |
|
"learning_rate": 0.00029856135469013987, |
|
"loss": 2.3768, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.1593172119487909, |
|
"grad_norm": 1.5374380350112915, |
|
"learning_rate": 0.0002977543663719779, |
|
"loss": 2.1616, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.18207681365576103, |
|
"grad_norm": 1.8372466564178467, |
|
"learning_rate": 0.00029677024347005013, |
|
"loss": 2.0086, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.20483641536273114, |
|
"grad_norm": 2.2954390048980713, |
|
"learning_rate": 0.0002956102991027524, |
|
"loss": 1.7581, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.22759601706970128, |
|
"grad_norm": 2.7270781993865967, |
|
"learning_rate": 0.0002942760809876348, |
|
"loss": 1.5204, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2503556187766714, |
|
"grad_norm": 2.2713541984558105, |
|
"learning_rate": 0.00029276936937627725, |
|
"loss": 1.2835, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.27311522048364156, |
|
"grad_norm": 2.338096857070923, |
|
"learning_rate": 0.000291092174678894, |
|
"loss": 1.1385, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2958748221906117, |
|
"grad_norm": 1.553781270980835, |
|
"learning_rate": 0.00028924673478183645, |
|
"loss": 0.9631, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3186344238975818, |
|
"grad_norm": 1.5467333793640137, |
|
"learning_rate": 0.0002872355120615748, |
|
"loss": 0.8275, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3413940256045519, |
|
"grad_norm": 1.656901240348816, |
|
"learning_rate": 0.00028506119009914, |
|
"loss": 0.7097, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.36415362731152207, |
|
"grad_norm": 1.358717679977417, |
|
"learning_rate": 0.000282726670099414, |
|
"loss": 0.6673, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3869132290184922, |
|
"grad_norm": 0.8717578649520874, |
|
"learning_rate": 0.00028023506702004174, |
|
"loss": 0.4671, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.4096728307254623, |
|
"grad_norm": 0.977169930934906, |
|
"learning_rate": 0.0002775897054151335, |
|
"loss": 0.4218, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.43243243243243246, |
|
"grad_norm": 0.9587895274162292, |
|
"learning_rate": 0.00027479411499930134, |
|
"loss": 0.4066, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.45519203413940257, |
|
"grad_norm": 0.6970980167388916, |
|
"learning_rate": 0.00027185202593794927, |
|
"loss": 0.3582, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4779516358463727, |
|
"grad_norm": 0.7278751730918884, |
|
"learning_rate": 0.0002687673638701018, |
|
"loss": 0.3208, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5007112375533428, |
|
"grad_norm": 0.7912285327911377, |
|
"learning_rate": 0.00026554424467041055, |
|
"loss": 0.4071, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5234708392603129, |
|
"grad_norm": 0.8546118140220642, |
|
"learning_rate": 0.00026218696895732944, |
|
"loss": 0.3329, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.5462304409672831, |
|
"grad_norm": 0.4830358326435089, |
|
"learning_rate": 0.0002587000163547856, |
|
"loss": 0.3032, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5689900426742532, |
|
"grad_norm": 0.7155347466468811, |
|
"learning_rate": 0.0002550880395150023, |
|
"loss": 0.2946, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5689900426742532, |
|
"eval_loss": 0.2797185778617859, |
|
"eval_runtime": 0.2962, |
|
"eval_samples_per_second": 168.833, |
|
"eval_steps_per_second": 43.897, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5917496443812233, |
|
"grad_norm": 0.5971561670303345, |
|
"learning_rate": 0.0002513558579104503, |
|
"loss": 0.2866, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.6145092460881935, |
|
"grad_norm": 0.5399057269096375, |
|
"learning_rate": 0.00024750845140320964, |
|
"loss": 0.3, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.6372688477951636, |
|
"grad_norm": 0.5159599781036377, |
|
"learning_rate": 0.00024355095360032364, |
|
"loss": 0.2229, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.6600284495021337, |
|
"grad_norm": 0.36613932251930237, |
|
"learning_rate": 0.00023948864500401016, |
|
"loss": 0.2183, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.6827880512091038, |
|
"grad_norm": 0.4476757049560547, |
|
"learning_rate": 0.00023532694596587055, |
|
"loss": 0.1993, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.705547652916074, |
|
"grad_norm": 0.47915318608283997, |
|
"learning_rate": 0.00023107140945449652, |
|
"loss": 0.177, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.7283072546230441, |
|
"grad_norm": 0.47024720907211304, |
|
"learning_rate": 0.0002267277136461262, |
|
"loss": 0.1281, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.7510668563300142, |
|
"grad_norm": 0.43254679441452026, |
|
"learning_rate": 0.00022230165434823502, |
|
"loss": 0.2339, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.7738264580369844, |
|
"grad_norm": 0.6018503904342651, |
|
"learning_rate": 0.00021779913726617102, |
|
"loss": 0.2309, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.7965860597439545, |
|
"grad_norm": 0.42272430658340454, |
|
"learning_rate": 0.00021322617012315288, |
|
"loss": 0.1736, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.8193456614509246, |
|
"grad_norm": 0.4929526746273041, |
|
"learning_rate": 0.00020858885464414522, |
|
"loss": 0.1818, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.5257923007011414, |
|
"learning_rate": 0.00020389337841430707, |
|
"loss": 0.1759, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.8648648648648649, |
|
"grad_norm": 0.32733604311943054, |
|
"learning_rate": 0.00019914600662287684, |
|
"loss": 0.1765, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.887624466571835, |
|
"grad_norm": 0.3499005138874054, |
|
"learning_rate": 0.00019435307370351017, |
|
"loss": 0.1172, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.9103840682788051, |
|
"grad_norm": 0.32600706815719604, |
|
"learning_rate": 0.0001895209748822239, |
|
"loss": 0.1107, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9331436699857752, |
|
"grad_norm": 0.3189937174320221, |
|
"learning_rate": 0.00018465615764422566, |
|
"loss": 0.134, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.9559032716927454, |
|
"grad_norm": 0.26007068157196045, |
|
"learning_rate": 0.00017976511313101307, |
|
"loss": 0.1037, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.9786628733997155, |
|
"grad_norm": 0.3448164463043213, |
|
"learning_rate": 0.00017485436747922248, |
|
"loss": 0.0882, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.0064011379800855, |
|
"grad_norm": 0.28717777132987976, |
|
"learning_rate": 0.00016993047311278397, |
|
"loss": 0.2189, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.0291607396870555, |
|
"grad_norm": 0.2990550994873047, |
|
"learning_rate": 0.000165, |
|
"loss": 0.1578, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.0519203413940257, |
|
"grad_norm": 0.3317011594772339, |
|
"learning_rate": 0.00016006952688721603, |
|
"loss": 0.1449, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.0746799431009957, |
|
"grad_norm": 0.31591519713401794, |
|
"learning_rate": 0.0001551456325207775, |
|
"loss": 0.1431, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.097439544807966, |
|
"grad_norm": 0.26354071497917175, |
|
"learning_rate": 0.00015023488686898698, |
|
"loss": 0.1444, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.120199146514936, |
|
"grad_norm": 0.2392982542514801, |
|
"learning_rate": 0.00014534384235577436, |
|
"loss": 0.1312, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.1429587482219061, |
|
"grad_norm": 0.3284483551979065, |
|
"learning_rate": 0.0001404790251177761, |
|
"loss": 0.1079, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1429587482219061, |
|
"eval_loss": 0.11645537614822388, |
|
"eval_runtime": 0.2863, |
|
"eval_samples_per_second": 174.617, |
|
"eval_steps_per_second": 45.4, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 88, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.22140137734144e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|