|
{ |
|
"best_metric": 1.3211801052093506, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0026203734032099572, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.240746806419915e-05, |
|
"grad_norm": 0.1738579273223877, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3403, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 5.240746806419915e-05, |
|
"eval_loss": 1.598608374595642, |
|
"eval_runtime": 820.1786, |
|
"eval_samples_per_second": 9.797, |
|
"eval_steps_per_second": 4.899, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001048149361283983, |
|
"grad_norm": 0.20513613522052765, |
|
"learning_rate": 0.0002, |
|
"loss": 1.1966, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00015722240419259744, |
|
"grad_norm": 0.20996616780757904, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 1.3571, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0002096298722567966, |
|
"grad_norm": 0.2220926582813263, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 1.2344, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00026203734032099573, |
|
"grad_norm": 0.24656227231025696, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 1.4127, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00031444480838519487, |
|
"grad_norm": 0.24758820235729218, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 1.4265, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00036685227644939406, |
|
"grad_norm": 0.24943935871124268, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 1.2982, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0004192597445135932, |
|
"grad_norm": 0.24972639977931976, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 1.2243, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00047166721257779233, |
|
"grad_norm": 0.2653735280036926, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 1.1996, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005240746806419915, |
|
"grad_norm": 0.29955682158470154, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.1895, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005764821487061906, |
|
"grad_norm": 0.2780078947544098, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 1.1721, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0006288896167703897, |
|
"grad_norm": 0.3014898896217346, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 1.3355, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0006812970848345889, |
|
"grad_norm": 0.3645365536212921, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 1.1965, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0007337045528987881, |
|
"grad_norm": 0.34544700384140015, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.0024, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0007861120209629873, |
|
"grad_norm": 0.35733968019485474, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 1.2473, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0008385194890271864, |
|
"grad_norm": 0.3587718605995178, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 1.2417, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0008909269570913855, |
|
"grad_norm": 0.3632557988166809, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 1.063, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0009433344251555847, |
|
"grad_norm": 0.29989340901374817, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.0668, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0009957418932197838, |
|
"grad_norm": 0.3830617368221283, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 1.1489, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.001048149361283983, |
|
"grad_norm": 0.46538257598876953, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.2864, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.001100556829348182, |
|
"grad_norm": 0.3305760622024536, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 1.1292, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0011529642974123812, |
|
"grad_norm": 0.36099550127983093, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 1.2849, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0012053717654765803, |
|
"grad_norm": 0.4047800898551941, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 1.3647, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0012577792335407795, |
|
"grad_norm": 0.3234100937843323, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 1.2295, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0013101867016049786, |
|
"grad_norm": 0.35075563192367554, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 1.2979, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0013101867016049786, |
|
"eval_loss": 1.3386822938919067, |
|
"eval_runtime": 823.3662, |
|
"eval_samples_per_second": 9.759, |
|
"eval_steps_per_second": 4.88, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0013625941696691778, |
|
"grad_norm": 0.39403942227363586, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2445, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.001415001637733377, |
|
"grad_norm": 0.43036386370658875, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 1.2675, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0014674091057975762, |
|
"grad_norm": 0.36898255348205566, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 1.2681, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0015198165738617754, |
|
"grad_norm": 0.48340263962745667, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 1.1686, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0015722240419259745, |
|
"grad_norm": 0.413663387298584, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 1.2895, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0016246315099901737, |
|
"grad_norm": 0.4055498242378235, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 1.2696, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0016770389780543728, |
|
"grad_norm": 0.4815317988395691, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 1.3778, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.001729446446118572, |
|
"grad_norm": 0.4714352786540985, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 1.345, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.001781853914182771, |
|
"grad_norm": 0.48492833971977234, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 1.3, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0018342613822469702, |
|
"grad_norm": 0.5202695727348328, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 1.3596, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0018866688503111693, |
|
"grad_norm": 0.5492162108421326, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 1.3763, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0019390763183753685, |
|
"grad_norm": 0.5086742043495178, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 1.3309, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0019914837864395676, |
|
"grad_norm": 0.6512495279312134, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 1.2766, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.002043891254503767, |
|
"grad_norm": 0.5928826332092285, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 1.2372, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.002096298722567966, |
|
"grad_norm": 0.9627623558044434, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 1.3238, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0021487061906321652, |
|
"grad_norm": 1.0270004272460938, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 1.4212, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.002201113658696364, |
|
"grad_norm": 1.0594191551208496, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 1.1272, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0022535211267605635, |
|
"grad_norm": 1.4876549243927002, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 1.3084, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0023059285948247624, |
|
"grad_norm": 2.034849166870117, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 1.5273, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0023583360628889618, |
|
"grad_norm": 1.6223154067993164, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 1.4066, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0024107435309531607, |
|
"grad_norm": 1.6114851236343384, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 1.2965, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.00246315099901736, |
|
"grad_norm": 1.6107181310653687, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 1.4148, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.002515558467081559, |
|
"grad_norm": 1.9817970991134644, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 1.6692, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0025679659351457583, |
|
"grad_norm": 5.455220699310303, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 2.7254, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0026203734032099572, |
|
"grad_norm": 7.535577297210693, |
|
"learning_rate": 0.0, |
|
"loss": 2.5626, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0026203734032099572, |
|
"eval_loss": 1.3211801052093506, |
|
"eval_runtime": 823.1773, |
|
"eval_samples_per_second": 9.761, |
|
"eval_steps_per_second": 4.881, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.02936877203456e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|