|
{ |
|
"best_metric": 1.7017016410827637, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.069856793573175, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0013971358714634998, |
|
"grad_norm": 0.6773235201835632, |
|
"learning_rate": 0.0001, |
|
"loss": 1.8541, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0013971358714634998, |
|
"eval_loss": 2.437103509902954, |
|
"eval_runtime": 94.4959, |
|
"eval_samples_per_second": 3.196, |
|
"eval_steps_per_second": 1.598, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0027942717429269995, |
|
"grad_norm": 0.8164823651313782, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0925, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0041914076143905, |
|
"grad_norm": 0.893460214138031, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 1.9269, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005588543485853999, |
|
"grad_norm": 1.0951659679412842, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 1.9705, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.006985679357317499, |
|
"grad_norm": 0.690770149230957, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 2.108, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.008382815228781, |
|
"grad_norm": 0.7501918077468872, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 1.6891, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.009779951100244499, |
|
"grad_norm": 0.7339282035827637, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 1.7287, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.011177086971707998, |
|
"grad_norm": 0.7356044054031372, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 1.704, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.012574222843171499, |
|
"grad_norm": 0.7521883249282837, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 1.9468, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.013971358714634998, |
|
"grad_norm": 0.628012478351593, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.5462, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.015368494586098498, |
|
"grad_norm": 0.7118824124336243, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 1.8561, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.016765630457562, |
|
"grad_norm": 0.6984536647796631, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 1.8444, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.018162766329025498, |
|
"grad_norm": 0.7371782660484314, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 1.8548, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.019559902200488997, |
|
"grad_norm": 0.737273633480072, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.7329, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.020957038071952497, |
|
"grad_norm": 0.7812173962593079, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 1.8888, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.022354173943415996, |
|
"grad_norm": 1.094999074935913, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 2.1278, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.023751309814879495, |
|
"grad_norm": 0.9387333989143372, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 2.1146, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.025148445686342998, |
|
"grad_norm": 0.924557626247406, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.9719, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.026545581557806498, |
|
"grad_norm": 0.8792619705200195, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 1.9341, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.027942717429269997, |
|
"grad_norm": 0.7050760388374329, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.7321, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.029339853300733496, |
|
"grad_norm": 0.9437889456748962, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 1.6508, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.030736989172196996, |
|
"grad_norm": 0.6894789338111877, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 1.4325, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0321341250436605, |
|
"grad_norm": 0.8473041653633118, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 1.6303, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.033531260915124, |
|
"grad_norm": 1.1612331867218018, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 2.124, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0349283967865875, |
|
"grad_norm": 1.0456833839416504, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 2.0162, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0349283967865875, |
|
"eval_loss": 1.7710518836975098, |
|
"eval_runtime": 96.1396, |
|
"eval_samples_per_second": 3.141, |
|
"eval_steps_per_second": 1.571, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.036325532658050996, |
|
"grad_norm": 1.1468117237091064, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0482, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.037722668529514496, |
|
"grad_norm": 0.8366401791572571, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 1.6346, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.039119804400977995, |
|
"grad_norm": 0.7719747424125671, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 1.5849, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.040516940272441494, |
|
"grad_norm": 0.9361782670021057, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 1.8551, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.041914076143904994, |
|
"grad_norm": 1.128204345703125, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 1.9648, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04331121201536849, |
|
"grad_norm": 1.0024124383926392, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 1.8017, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04470834788683199, |
|
"grad_norm": 0.9835119843482971, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 1.7692, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04610548375829549, |
|
"grad_norm": 1.0458788871765137, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 1.456, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04750261962975899, |
|
"grad_norm": 1.029359221458435, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 2.1277, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0488997555012225, |
|
"grad_norm": 0.8961425423622131, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 1.6894, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.050296891372685996, |
|
"grad_norm": 0.9357238411903381, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 1.6413, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.051694027244149496, |
|
"grad_norm": 0.9889127612113953, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 1.7025, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.053091163115612995, |
|
"grad_norm": 0.9021267890930176, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 1.3933, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.054488298987076494, |
|
"grad_norm": 1.259774088859558, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 1.6318, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.055885434858539994, |
|
"grad_norm": 0.9996725916862488, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 1.6355, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05728257073000349, |
|
"grad_norm": 1.31227707862854, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 1.8715, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.05867970660146699, |
|
"grad_norm": 1.266066312789917, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 2.0598, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06007684247293049, |
|
"grad_norm": 1.1719212532043457, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 2.1243, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06147397834439399, |
|
"grad_norm": 1.2561590671539307, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 1.7084, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06287111421585749, |
|
"grad_norm": 1.4959790706634521, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 1.8423, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.064268250087321, |
|
"grad_norm": 1.5612496137619019, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 1.8425, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06566538595878449, |
|
"grad_norm": 1.473332166671753, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 1.5454, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.067062521830248, |
|
"grad_norm": 1.9833347797393799, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 2.099, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06845965770171149, |
|
"grad_norm": 1.5466455221176147, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 1.6702, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.069856793573175, |
|
"grad_norm": 1.9173717498779297, |
|
"learning_rate": 0.0, |
|
"loss": 1.8048, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.069856793573175, |
|
"eval_loss": 1.7017016410827637, |
|
"eval_runtime": 96.2516, |
|
"eval_samples_per_second": 3.138, |
|
"eval_steps_per_second": 1.569, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.99038693326848e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|