|
{ |
|
"best_metric": 1.3719125986099243, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.013988948730502902, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002797789746100581, |
|
"grad_norm": 0.6457531452178955, |
|
"learning_rate": 5e-05, |
|
"loss": 2.118, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002797789746100581, |
|
"eval_loss": 4.790634632110596, |
|
"eval_runtime": 290.7116, |
|
"eval_samples_per_second": 82.831, |
|
"eval_steps_per_second": 10.354, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005595579492201162, |
|
"grad_norm": 2.5176806449890137, |
|
"learning_rate": 0.0001, |
|
"loss": 2.6891, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0008393369238301741, |
|
"grad_norm": 0.9567082524299622, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 2.05, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0011191158984402323, |
|
"grad_norm": 2.075179100036621, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 2.4286, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0013988948730502902, |
|
"grad_norm": 1.5577850341796875, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 2.4824, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0016786738476603483, |
|
"grad_norm": 1.5086711645126343, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.7467, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0019584528222704066, |
|
"grad_norm": 1.5570716857910156, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.4736, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0022382317968804646, |
|
"grad_norm": 1.768438696861267, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.3928, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0025180107714905223, |
|
"grad_norm": 2.5458669662475586, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.7248, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0027977897461005804, |
|
"grad_norm": 2.0986435413360596, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.7086, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0030775687207106384, |
|
"grad_norm": 2.463690996170044, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.9833, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0033573476953206965, |
|
"grad_norm": 2.4661600589752197, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.7851, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0036371266699307546, |
|
"grad_norm": 0.5250707864761353, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 2.2392, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.003916905644540813, |
|
"grad_norm": 1.2579808235168457, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.9034, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.004196684619150871, |
|
"grad_norm": 1.0845015048980713, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.303, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.004476463593760929, |
|
"grad_norm": 1.1315339803695679, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.0852, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0047562425683709865, |
|
"grad_norm": 0.6806582808494568, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 2.3763, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.005036021542981045, |
|
"grad_norm": 1.0810201168060303, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.4589, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.005315800517591103, |
|
"grad_norm": 0.8670059442520142, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.4309, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.005595579492201161, |
|
"grad_norm": 0.8074933290481567, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.8018, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.005875358466811219, |
|
"grad_norm": 1.1735812425613403, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.3628, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.006155137441421277, |
|
"grad_norm": 1.394433617591858, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.4953, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.006434916416031335, |
|
"grad_norm": 1.1334067583084106, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.6903, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.006714695390641393, |
|
"grad_norm": 1.4898574352264404, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.5683, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.006994474365251451, |
|
"grad_norm": 2.480189323425293, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 2.313, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.006994474365251451, |
|
"eval_loss": 1.4263904094696045, |
|
"eval_runtime": 289.3918, |
|
"eval_samples_per_second": 83.209, |
|
"eval_steps_per_second": 10.401, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007274253339861509, |
|
"grad_norm": 0.41041114926338196, |
|
"learning_rate": 5e-05, |
|
"loss": 2.0975, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.007554032314471567, |
|
"grad_norm": 1.5040510892868042, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.939, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.007833811289081626, |
|
"grad_norm": 0.6277198791503906, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.6496, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.008113590263691683, |
|
"grad_norm": 0.8916029334068298, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.8643, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.008393369238301742, |
|
"grad_norm": 0.7794185876846313, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 2.0262, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0086731482129118, |
|
"grad_norm": 0.9928082227706909, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.1687, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.008952927187521859, |
|
"grad_norm": 0.9538123607635498, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.2194, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.009232706162131916, |
|
"grad_norm": 1.2503325939178467, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.179, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.009512485136741973, |
|
"grad_norm": 1.0158066749572754, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.2768, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.009792264111352032, |
|
"grad_norm": 1.1659492254257202, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.5504, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01007204308596209, |
|
"grad_norm": 1.3990727663040161, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.7014, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.010351822060572148, |
|
"grad_norm": 1.8722199201583862, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.9507, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.010631601035182205, |
|
"grad_norm": 0.545448362827301, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.2116, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.010911380009792264, |
|
"grad_norm": 0.848518967628479, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.7857, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.011191158984402321, |
|
"grad_norm": 0.7432922124862671, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.6847, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01147093795901238, |
|
"grad_norm": 0.6820698976516724, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.2811, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.011750716933622438, |
|
"grad_norm": 0.8533961772918701, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.6261, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.012030495908232497, |
|
"grad_norm": 0.8349013328552246, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.5641, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.012310274882842554, |
|
"grad_norm": 0.8028209209442139, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.3012, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.012590053857452613, |
|
"grad_norm": 0.8586874604225159, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.4158, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01286983283206267, |
|
"grad_norm": 1.2300059795379639, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.2818, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.013149611806672729, |
|
"grad_norm": 1.4779167175292969, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.1995, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.013429390781282786, |
|
"grad_norm": 1.1225005388259888, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.682, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.013709169755892845, |
|
"grad_norm": 1.845724105834961, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.7847, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.013988948730502902, |
|
"grad_norm": 2.089684009552002, |
|
"learning_rate": 0.0, |
|
"loss": 1.8361, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.013988948730502902, |
|
"eval_loss": 1.3719125986099243, |
|
"eval_runtime": 289.049, |
|
"eval_samples_per_second": 83.308, |
|
"eval_steps_per_second": 10.413, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0741049715379405e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|