|
{ |
|
"best_metric": 0.07640355080366135, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.15939430165371588, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0031878860330743176, |
|
"grad_norm": 21.039119720458984, |
|
"learning_rate": 5e-05, |
|
"loss": 12.8383, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0031878860330743176, |
|
"eval_loss": 13.454628944396973, |
|
"eval_runtime": 143.3396, |
|
"eval_samples_per_second": 14.748, |
|
"eval_steps_per_second": 1.849, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006375772066148635, |
|
"grad_norm": 21.684816360473633, |
|
"learning_rate": 0.0001, |
|
"loss": 12.8279, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009563658099222952, |
|
"grad_norm": 23.908435821533203, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 11.4862, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01275154413229727, |
|
"grad_norm": 34.252071380615234, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 4.7458, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01593943016537159, |
|
"grad_norm": 13.637765884399414, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.7309, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.019127316198445904, |
|
"grad_norm": 0.7787806987762451, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.017, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.022315202231520222, |
|
"grad_norm": 0.2431143969297409, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 0.0029, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02550308826459454, |
|
"grad_norm": 0.021578334271907806, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.0004, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02869097429766886, |
|
"grad_norm": 2.1936194896698, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 0.0859, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03187886033074318, |
|
"grad_norm": 4.893922328948975, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.3001, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03506674636381749, |
|
"grad_norm": 3.905122995376587, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.317, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03825463239689181, |
|
"grad_norm": 3.8358139991760254, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.6007, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04144251842996613, |
|
"grad_norm": 1.3878974914550781, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.3397, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.044630404463040445, |
|
"grad_norm": 1.4316143989562988, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.019, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04781829049611477, |
|
"grad_norm": 0.023320436477661133, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.0002, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05100617652918908, |
|
"grad_norm": 0.0005990703357383609, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0541940625622634, |
|
"grad_norm": 0.00043424381874501705, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05738194859533772, |
|
"grad_norm": 0.0003476722922641784, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.060569834628412034, |
|
"grad_norm": 0.00039929794729687274, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06375772066148636, |
|
"grad_norm": 0.08998116850852966, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0097, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06694560669456066, |
|
"grad_norm": 0.09896165132522583, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.0072, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.07013349272763499, |
|
"grad_norm": 0.4899124801158905, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.0759, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07332137876070931, |
|
"grad_norm": 1.5196644067764282, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.2731, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07650926479378362, |
|
"grad_norm": 1.9914950132369995, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.4005, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07969715082685794, |
|
"grad_norm": 1.3223192691802979, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.5097, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07969715082685794, |
|
"eval_loss": 0.07755980640649796, |
|
"eval_runtime": 143.3592, |
|
"eval_samples_per_second": 14.746, |
|
"eval_steps_per_second": 1.849, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08288503685993226, |
|
"grad_norm": 0.0372718870639801, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0005, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08607292289300658, |
|
"grad_norm": 0.0864085778594017, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.0087, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08926080892608089, |
|
"grad_norm": 0.07040909677743912, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.001, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09244869495915521, |
|
"grad_norm": 0.08171924203634262, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.0013, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09563658099222953, |
|
"grad_norm": 0.04170752316713333, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0006, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09882446702530384, |
|
"grad_norm": 0.04046756774187088, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.0006, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.10201235305837816, |
|
"grad_norm": 0.10425488650798798, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0097, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10520023909145249, |
|
"grad_norm": 0.13888247311115265, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.0111, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1083881251245268, |
|
"grad_norm": 0.42929139733314514, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.0647, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.11157601115760112, |
|
"grad_norm": 0.5731561779975891, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.0943, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11476389719067544, |
|
"grad_norm": 0.602446973323822, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.2037, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11795178322374975, |
|
"grad_norm": 1.7377841472625732, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.5449, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.12113966925682407, |
|
"grad_norm": 1.0830893516540527, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.3696, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12432755528989839, |
|
"grad_norm": 0.007708725985139608, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.0001, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1275154413229727, |
|
"grad_norm": 0.008319525048136711, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.0001, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13070332735604703, |
|
"grad_norm": 0.006531168706715107, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.0001, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.13389121338912133, |
|
"grad_norm": 0.005489464849233627, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.0001, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13707909942219565, |
|
"grad_norm": 0.005156314000487328, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.0001, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.14026698545526997, |
|
"grad_norm": 0.005498033948242664, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0001, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1434548714883443, |
|
"grad_norm": 0.005461568012833595, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.0001, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14664275752141862, |
|
"grad_norm": 0.0800841674208641, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.0069, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14983064355449294, |
|
"grad_norm": 0.22930651903152466, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.0571, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.15301852958756723, |
|
"grad_norm": 0.622684121131897, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.201, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15620641562064155, |
|
"grad_norm": 0.3593548834323883, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.2477, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.15939430165371588, |
|
"grad_norm": 0.9446438550949097, |
|
"learning_rate": 0.0, |
|
"loss": 0.593, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15939430165371588, |
|
"eval_loss": 0.07640355080366135, |
|
"eval_runtime": 143.3385, |
|
"eval_samples_per_second": 14.748, |
|
"eval_steps_per_second": 1.849, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|