|
{ |
|
"best_metric": 1.7148971557617188, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.07802594362625573, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0015605188725251145, |
|
"grad_norm": 27.093671798706055, |
|
"learning_rate": 5e-05, |
|
"loss": 3.6352, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015605188725251145, |
|
"eval_loss": 5.159157752990723, |
|
"eval_runtime": 284.5779, |
|
"eval_samples_per_second": 15.17, |
|
"eval_steps_per_second": 1.898, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003121037745050229, |
|
"grad_norm": 35.23081970214844, |
|
"learning_rate": 0.0001, |
|
"loss": 3.9431, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004681556617575344, |
|
"grad_norm": 47.64265823364258, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 4.2257, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.006242075490100458, |
|
"grad_norm": 49.69529724121094, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 4.1541, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.007802594362625573, |
|
"grad_norm": 62.94880676269531, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 3.7494, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.009363113235150688, |
|
"grad_norm": 58.486602783203125, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 3.0434, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010923632107675801, |
|
"grad_norm": 47.32489776611328, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 2.4626, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.012484150980200916, |
|
"grad_norm": 28.48284912109375, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.1683, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.014044669852726031, |
|
"grad_norm": 36.46367645263672, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 2.0081, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.015605188725251146, |
|
"grad_norm": 31.903751373291016, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.735, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01716570759777626, |
|
"grad_norm": 18.21411895751953, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.5997, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.018726226470301376, |
|
"grad_norm": 33.31740951538086, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 2.0939, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02028674534282649, |
|
"grad_norm": 27.541213989257812, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 2.3984, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.021847264215351603, |
|
"grad_norm": 32.7118034362793, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.366, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.023407783087876718, |
|
"grad_norm": 28.815990447998047, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 2.2951, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.024968301960401833, |
|
"grad_norm": 22.008098602294922, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 2.1483, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.026528820832926948, |
|
"grad_norm": 19.000747680664062, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.9089, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.028089339705452063, |
|
"grad_norm": 15.313111305236816, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.7744, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.029649858577977178, |
|
"grad_norm": 12.31628131866455, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.7365, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.031210377450502293, |
|
"grad_norm": 11.852285385131836, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.5183, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.032770896323027404, |
|
"grad_norm": 11.74448299407959, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.651, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03433141519555252, |
|
"grad_norm": 13.442978858947754, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.5721, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.035891934068077634, |
|
"grad_norm": 14.07254409790039, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.4983, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03745245294060275, |
|
"grad_norm": 16.79251480102539, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.4203, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.039012971813127864, |
|
"grad_norm": 23.276710510253906, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 2.2293, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.039012971813127864, |
|
"eval_loss": 1.7845427989959717, |
|
"eval_runtime": 284.5304, |
|
"eval_samples_per_second": 15.172, |
|
"eval_steps_per_second": 1.898, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04057349068565298, |
|
"grad_norm": 11.740275382995605, |
|
"learning_rate": 5e-05, |
|
"loss": 2.1754, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.042134009558178094, |
|
"grad_norm": 13.802135467529297, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 2.0593, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.043694528430703206, |
|
"grad_norm": 11.973353385925293, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.9128, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.045255047303228324, |
|
"grad_norm": 10.648812294006348, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.8191, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.046815566175753436, |
|
"grad_norm": 10.436445236206055, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.8078, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.048376085048278554, |
|
"grad_norm": 10.531221389770508, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.7198, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.049936603920803666, |
|
"grad_norm": 7.770738124847412, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.5549, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.051497122793328784, |
|
"grad_norm": 8.979857444763184, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.5268, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.053057641665853895, |
|
"grad_norm": 9.087567329406738, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.6059, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.054618160538379014, |
|
"grad_norm": 11.504910469055176, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.345, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.056178679410904125, |
|
"grad_norm": 9.279886245727539, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.4742, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05773919828342924, |
|
"grad_norm": 18.69571304321289, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.8889, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.059299717155954355, |
|
"grad_norm": 24.7452392578125, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.1979, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.06086023602847947, |
|
"grad_norm": 7.359485626220703, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.9981, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.062420754901004585, |
|
"grad_norm": 9.63357925415039, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.0834, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0639812737735297, |
|
"grad_norm": 15.735189437866211, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.824, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06554179264605481, |
|
"grad_norm": 6.923338413238525, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.8121, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06710231151857993, |
|
"grad_norm": 11.885785102844238, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.8259, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06866283039110505, |
|
"grad_norm": 9.307328224182129, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.661, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.07022334926363016, |
|
"grad_norm": 6.920248985290527, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.6489, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07178386813615527, |
|
"grad_norm": 7.753159999847412, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.5188, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.07334438700868039, |
|
"grad_norm": 10.333271026611328, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.4593, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0749049058812055, |
|
"grad_norm": 9.383374214172363, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.4722, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07646542475373061, |
|
"grad_norm": 20.21590805053711, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.55, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.07802594362625573, |
|
"grad_norm": 30.963455200195312, |
|
"learning_rate": 0.0, |
|
"loss": 2.1153, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07802594362625573, |
|
"eval_loss": 1.7148971557617188, |
|
"eval_runtime": 284.4128, |
|
"eval_samples_per_second": 15.179, |
|
"eval_steps_per_second": 1.899, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0937288838086656e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|