|
{ |
|
"best_metric": 0.13963158428668976, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1266624445851805, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00253324889170361, |
|
"grad_norm": 3.6676442623138428, |
|
"learning_rate": 4e-05, |
|
"loss": 3.0591, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00253324889170361, |
|
"eval_loss": 3.577761650085449, |
|
"eval_runtime": 44.9583, |
|
"eval_samples_per_second": 3.715, |
|
"eval_steps_per_second": 1.868, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00506649778340722, |
|
"grad_norm": 3.199979305267334, |
|
"learning_rate": 8e-05, |
|
"loss": 2.7215, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007599746675110829, |
|
"grad_norm": 3.33293080329895, |
|
"learning_rate": 0.00012, |
|
"loss": 2.9505, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01013299556681444, |
|
"grad_norm": 4.012212753295898, |
|
"learning_rate": 0.00016, |
|
"loss": 2.8184, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01266624445851805, |
|
"grad_norm": 4.058014869689941, |
|
"learning_rate": 0.0002, |
|
"loss": 1.986, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015199493350221659, |
|
"grad_norm": 3.3792548179626465, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 1.3189, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01773274224192527, |
|
"grad_norm": 3.53857159614563, |
|
"learning_rate": 0.00019978136272187747, |
|
"loss": 0.7923, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02026599113362888, |
|
"grad_norm": 5.587308406829834, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 0.7796, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.022799240025332488, |
|
"grad_norm": 3.04203724861145, |
|
"learning_rate": 0.00019912640693269752, |
|
"loss": 0.1713, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0253324889170361, |
|
"grad_norm": 3.9056479930877686, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 0.223, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02786573780873971, |
|
"grad_norm": 3.32242488861084, |
|
"learning_rate": 0.00019803799658748094, |
|
"loss": 0.256, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.030398986700443317, |
|
"grad_norm": 0.5264054536819458, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 0.0183, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.032932235592146926, |
|
"grad_norm": 2.039252281188965, |
|
"learning_rate": 0.00019652089102773488, |
|
"loss": 0.2864, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03546548448385054, |
|
"grad_norm": 1.891056776046753, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 0.0422, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03799873337555415, |
|
"grad_norm": 2.2989518642425537, |
|
"learning_rate": 0.00019458172417006347, |
|
"loss": 0.5805, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04053198226725776, |
|
"grad_norm": 2.4155068397521973, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 0.2322, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04306523115896137, |
|
"grad_norm": 2.4074628353118896, |
|
"learning_rate": 0.00019222897549773848, |
|
"loss": 0.4752, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.045598480050664976, |
|
"grad_norm": 0.9219037294387817, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 0.2356, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.048131728942368585, |
|
"grad_norm": 1.37091064453125, |
|
"learning_rate": 0.00018947293298207635, |
|
"loss": 0.2088, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0506649778340722, |
|
"grad_norm": 0.2628805935382843, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 0.0109, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05319822672577581, |
|
"grad_norm": 2.158172845840454, |
|
"learning_rate": 0.00018632564809575742, |
|
"loss": 0.2097, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05573147561747942, |
|
"grad_norm": 1.4831571578979492, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 0.1739, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.058264724509183026, |
|
"grad_norm": 1.6883933544158936, |
|
"learning_rate": 0.00018280088311480201, |
|
"loss": 0.2868, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.060797973400886635, |
|
"grad_norm": 1.1558237075805664, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.1405, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06333122229259025, |
|
"grad_norm": 1.421405553817749, |
|
"learning_rate": 0.00017891405093963938, |
|
"loss": 0.162, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06586447118429385, |
|
"grad_norm": 3.7727131843566895, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 0.291, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06839772007599747, |
|
"grad_norm": 1.3770571947097778, |
|
"learning_rate": 0.0001746821476984154, |
|
"loss": 0.12, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07093096896770108, |
|
"grad_norm": 6.9208149909973145, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 0.7353, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07346421785940468, |
|
"grad_norm": 2.6051218509674072, |
|
"learning_rate": 0.00017012367842724887, |
|
"loss": 0.2355, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0759974667511083, |
|
"grad_norm": 3.3124608993530273, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 0.7072, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0785307156428119, |
|
"grad_norm": 2.073368787765503, |
|
"learning_rate": 0.00016525857615241687, |
|
"loss": 0.1301, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08106396453451552, |
|
"grad_norm": 0.6973274350166321, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 0.0637, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08359721342621912, |
|
"grad_norm": 1.706817865371704, |
|
"learning_rate": 0.00016010811472830252, |
|
"loss": 0.125, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08613046231792273, |
|
"grad_norm": 1.9682927131652832, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 0.3533, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08866371120962635, |
|
"grad_norm": 2.4648215770721436, |
|
"learning_rate": 0.00015469481581224272, |
|
"loss": 0.6709, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09119696010132995, |
|
"grad_norm": 2.308652877807617, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 0.1397, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09373020899303357, |
|
"grad_norm": 3.2563140392303467, |
|
"learning_rate": 0.00014904235038305083, |
|
"loss": 0.3003, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09626345788473717, |
|
"grad_norm": 3.14212965965271, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 0.2907, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09879670677644078, |
|
"grad_norm": 1.726691484451294, |
|
"learning_rate": 0.00014317543523384928, |
|
"loss": 0.0962, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1013299556681444, |
|
"grad_norm": 4.3779449462890625, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 0.3433, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.103863204559848, |
|
"grad_norm": 3.401987314224243, |
|
"learning_rate": 0.00013711972489182208, |
|
"loss": 0.2321, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10639645345155162, |
|
"grad_norm": 1.7866804599761963, |
|
"learning_rate": 0.00013402931744416433, |
|
"loss": 0.0461, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10892970234325522, |
|
"grad_norm": 2.7210962772369385, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.2669, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11146295123495883, |
|
"grad_norm": 0.31108513474464417, |
|
"learning_rate": 0.00012774029087618446, |
|
"loss": 0.0179, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11399620012666245, |
|
"grad_norm": 1.0655226707458496, |
|
"learning_rate": 0.00012454854871407994, |
|
"loss": 0.1818, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11652944901836605, |
|
"grad_norm": 2.1581103801727295, |
|
"learning_rate": 0.0001213299630743747, |
|
"loss": 0.4705, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11906269791006967, |
|
"grad_norm": 0.13762931525707245, |
|
"learning_rate": 0.000118088053433211, |
|
"loss": 0.0066, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12159594680177327, |
|
"grad_norm": 0.04102027043700218, |
|
"learning_rate": 0.0001148263647711842, |
|
"loss": 0.0021, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12412919569347688, |
|
"grad_norm": 0.09145709127187729, |
|
"learning_rate": 0.00011154846369695863, |
|
"loss": 0.0054, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1266624445851805, |
|
"grad_norm": 0.6207671165466309, |
|
"learning_rate": 0.00010825793454723325, |
|
"loss": 0.009, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1266624445851805, |
|
"eval_loss": 0.13963158428668976, |
|
"eval_runtime": 45.5826, |
|
"eval_samples_per_second": 3.664, |
|
"eval_steps_per_second": 1.843, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.6109411688448e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|