|
{ |
|
"best_metric": 2.4438157081604004, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.4550625711035267, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009101251422070534, |
|
"grad_norm": 2.1346871852874756, |
|
"learning_rate": 5e-05, |
|
"loss": 4.9719, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009101251422070534, |
|
"eval_loss": 4.942025184631348, |
|
"eval_runtime": 4.0731, |
|
"eval_samples_per_second": 181.927, |
|
"eval_steps_per_second": 22.833, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01820250284414107, |
|
"grad_norm": 1.6914374828338623, |
|
"learning_rate": 0.0001, |
|
"loss": 4.9787, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.027303754266211604, |
|
"grad_norm": 1.4702175855636597, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 4.8334, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03640500568828214, |
|
"grad_norm": 1.6867676973342896, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 5.0761, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04550625711035267, |
|
"grad_norm": 1.8130220174789429, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 5.0172, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05460750853242321, |
|
"grad_norm": 1.3689041137695312, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 4.4869, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06370875995449374, |
|
"grad_norm": 1.9557944536209106, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 4.8043, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07281001137656427, |
|
"grad_norm": 1.3595128059387207, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 4.8043, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08191126279863481, |
|
"grad_norm": 1.7356663942337036, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 4.988, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09101251422070535, |
|
"grad_norm": 1.2412830591201782, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 4.3269, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10011376564277588, |
|
"grad_norm": 1.268452763557434, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 4.4747, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.10921501706484642, |
|
"grad_norm": 1.2650614976882935, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 4.0926, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.11831626848691695, |
|
"grad_norm": 1.2461438179016113, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 3.8517, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.12741751990898748, |
|
"grad_norm": 1.6487263441085815, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 3.712, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13651877133105803, |
|
"grad_norm": 1.447868824005127, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 3.5978, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.14562002275312855, |
|
"grad_norm": 1.4297068119049072, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 3.5478, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1547212741751991, |
|
"grad_norm": 1.739940881729126, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 3.6575, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.16382252559726962, |
|
"grad_norm": 1.542245864868164, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 3.4597, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.17292377701934017, |
|
"grad_norm": 1.7134660482406616, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 3.3924, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1820250284414107, |
|
"grad_norm": 1.4687405824661255, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 3.5119, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19112627986348124, |
|
"grad_norm": 1.7465482950210571, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 3.8799, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.20022753128555176, |
|
"grad_norm": 1.506808876991272, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 3.6514, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.20932878270762229, |
|
"grad_norm": 1.3481961488723755, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 3.7276, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.21843003412969283, |
|
"grad_norm": 1.4149906635284424, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 3.5602, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.22753128555176336, |
|
"grad_norm": 1.665215015411377, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 3.1668, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22753128555176336, |
|
"eval_loss": 3.0762650966644287, |
|
"eval_runtime": 4.0834, |
|
"eval_samples_per_second": 181.466, |
|
"eval_steps_per_second": 22.775, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2366325369738339, |
|
"grad_norm": 2.168104410171509, |
|
"learning_rate": 5e-05, |
|
"loss": 2.6265, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.24573378839590443, |
|
"grad_norm": 1.9618370532989502, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 2.7075, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.25483503981797495, |
|
"grad_norm": 1.8602323532104492, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 2.6952, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.26393629124004553, |
|
"grad_norm": 1.7716730833053589, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 2.6196, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.27303754266211605, |
|
"grad_norm": 1.9440735578536987, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 2.5962, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2821387940841866, |
|
"grad_norm": 1.8115489482879639, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 2.6244, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2912400455062571, |
|
"grad_norm": 1.9076594114303589, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.4369, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3003412969283277, |
|
"grad_norm": 1.523880124092102, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 2.8228, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3094425483503982, |
|
"grad_norm": 2.241621971130371, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 3.1889, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3185437997724687, |
|
"grad_norm": 1.773310661315918, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 2.9108, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.32764505119453924, |
|
"grad_norm": 1.6375255584716797, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 3.106, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.33674630261660976, |
|
"grad_norm": 1.7255483865737915, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 2.8329, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.34584755403868034, |
|
"grad_norm": 1.6252745389938354, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.3072, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.35494880546075086, |
|
"grad_norm": 2.1704254150390625, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 2.1483, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3640500568828214, |
|
"grad_norm": 1.8003798723220825, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.1688, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3731513083048919, |
|
"grad_norm": 1.7969120740890503, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 2.1399, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3822525597269625, |
|
"grad_norm": 1.9582467079162598, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.1466, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.391353811149033, |
|
"grad_norm": 1.8976678848266602, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 2.1565, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4004550625711035, |
|
"grad_norm": 1.9421591758728027, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.0217, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.40955631399317405, |
|
"grad_norm": 1.625119924545288, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 2.344, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.41865756541524457, |
|
"grad_norm": 2.1905601024627686, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 2.901, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.42775881683731515, |
|
"grad_norm": 1.9888536930084229, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 2.7267, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.43686006825938567, |
|
"grad_norm": 1.635216474533081, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 2.8926, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4459613196814562, |
|
"grad_norm": 1.7304415702819824, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 2.9722, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4550625711035267, |
|
"grad_norm": 2.324007749557495, |
|
"learning_rate": 0.0, |
|
"loss": 2.6059, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4550625711035267, |
|
"eval_loss": 2.4438157081604004, |
|
"eval_runtime": 4.0594, |
|
"eval_samples_per_second": 182.539, |
|
"eval_steps_per_second": 22.91, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9120441905971200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|