eeeebbb2's picture
Training in progress, step 50, checkpoint
7a8f7b0 verified
raw
history blame
10.2 kB
{
"best_metric": 0.9218409657478333,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.624512099921936,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01249024199843872,
"grad_norm": 4.690826892852783,
"learning_rate": 5e-05,
"loss": 3.1234,
"step": 1
},
{
"epoch": 0.01249024199843872,
"eval_loss": 5.620558261871338,
"eval_runtime": 3.8428,
"eval_samples_per_second": 13.011,
"eval_steps_per_second": 3.383,
"step": 1
},
{
"epoch": 0.02498048399687744,
"grad_norm": 5.722167491912842,
"learning_rate": 0.0001,
"loss": 3.7107,
"step": 2
},
{
"epoch": 0.03747072599531616,
"grad_norm": 5.577714443206787,
"learning_rate": 9.990365154573717e-05,
"loss": 3.8318,
"step": 3
},
{
"epoch": 0.04996096799375488,
"grad_norm": 5.402304172515869,
"learning_rate": 9.961501876182148e-05,
"loss": 3.4357,
"step": 4
},
{
"epoch": 0.0624512099921936,
"grad_norm": 5.725224494934082,
"learning_rate": 9.913533761814537e-05,
"loss": 2.9605,
"step": 5
},
{
"epoch": 0.07494145199063232,
"grad_norm": 4.146466255187988,
"learning_rate": 9.846666218300807e-05,
"loss": 2.2876,
"step": 6
},
{
"epoch": 0.08743169398907104,
"grad_norm": 3.2101497650146484,
"learning_rate": 9.761185582727977e-05,
"loss": 2.1256,
"step": 7
},
{
"epoch": 0.09992193598750976,
"grad_norm": 3.6687138080596924,
"learning_rate": 9.657457896300791e-05,
"loss": 1.9117,
"step": 8
},
{
"epoch": 0.11241217798594848,
"grad_norm": 3.7865750789642334,
"learning_rate": 9.535927336897098e-05,
"loss": 1.7385,
"step": 9
},
{
"epoch": 0.1249024199843872,
"grad_norm": 3.5925636291503906,
"learning_rate": 9.397114317029975e-05,
"loss": 1.5617,
"step": 10
},
{
"epoch": 0.13739266198282593,
"grad_norm": 3.28287410736084,
"learning_rate": 9.241613255361455e-05,
"loss": 1.5269,
"step": 11
},
{
"epoch": 0.14988290398126464,
"grad_norm": 4.296369552612305,
"learning_rate": 9.070090031310558e-05,
"loss": 1.5197,
"step": 12
},
{
"epoch": 0.16237314597970337,
"grad_norm": 5.540219783782959,
"learning_rate": 8.883279133655399e-05,
"loss": 1.7307,
"step": 13
},
{
"epoch": 0.17486338797814208,
"grad_norm": 5.277021408081055,
"learning_rate": 8.681980515339464e-05,
"loss": 1.5492,
"step": 14
},
{
"epoch": 0.1873536299765808,
"grad_norm": 2.781581163406372,
"learning_rate": 8.467056167950311e-05,
"loss": 1.1706,
"step": 15
},
{
"epoch": 0.19984387197501952,
"grad_norm": 2.5811822414398193,
"learning_rate": 8.239426430539243e-05,
"loss": 1.2165,
"step": 16
},
{
"epoch": 0.21233411397345825,
"grad_norm": 1.8358145952224731,
"learning_rate": 8.000066048588211e-05,
"loss": 1.0696,
"step": 17
},
{
"epoch": 0.22482435597189696,
"grad_norm": 1.5274910926818848,
"learning_rate": 7.75e-05,
"loss": 0.9806,
"step": 18
},
{
"epoch": 0.2373145979703357,
"grad_norm": 1.465651035308838,
"learning_rate": 7.490299105985507e-05,
"loss": 1.134,
"step": 19
},
{
"epoch": 0.2498048399687744,
"grad_norm": 1.4855949878692627,
"learning_rate": 7.222075445642904e-05,
"loss": 0.9989,
"step": 20
},
{
"epoch": 0.26229508196721313,
"grad_norm": 1.3240143060684204,
"learning_rate": 6.946477593864228e-05,
"loss": 0.9781,
"step": 21
},
{
"epoch": 0.27478532396565186,
"grad_norm": 1.5368101596832275,
"learning_rate": 6.664685702961344e-05,
"loss": 0.9662,
"step": 22
},
{
"epoch": 0.28727556596409054,
"grad_norm": 1.6269676685333252,
"learning_rate": 6.377906449072578e-05,
"loss": 0.9193,
"step": 23
},
{
"epoch": 0.2997658079625293,
"grad_norm": 2.395573616027832,
"learning_rate": 6.087367864990233e-05,
"loss": 1.0187,
"step": 24
},
{
"epoch": 0.312256049960968,
"grad_norm": 3.4702703952789307,
"learning_rate": 5.794314081535644e-05,
"loss": 1.0253,
"step": 25
},
{
"epoch": 0.312256049960968,
"eval_loss": 1.0936610698699951,
"eval_runtime": 3.4692,
"eval_samples_per_second": 14.412,
"eval_steps_per_second": 3.747,
"step": 25
},
{
"epoch": 0.32474629195940674,
"grad_norm": 1.5570573806762695,
"learning_rate": 5.500000000000001e-05,
"loss": 1.1832,
"step": 26
},
{
"epoch": 0.3372365339578454,
"grad_norm": 1.3983771800994873,
"learning_rate": 5.205685918464356e-05,
"loss": 1.044,
"step": 27
},
{
"epoch": 0.34972677595628415,
"grad_norm": 1.282069444656372,
"learning_rate": 4.912632135009769e-05,
"loss": 1.0703,
"step": 28
},
{
"epoch": 0.3622170179547229,
"grad_norm": 1.3308384418487549,
"learning_rate": 4.6220935509274235e-05,
"loss": 1.0034,
"step": 29
},
{
"epoch": 0.3747072599531616,
"grad_norm": 1.311693787574768,
"learning_rate": 4.3353142970386564e-05,
"loss": 0.9713,
"step": 30
},
{
"epoch": 0.3871975019516003,
"grad_norm": 1.3370124101638794,
"learning_rate": 4.053522406135775e-05,
"loss": 0.9209,
"step": 31
},
{
"epoch": 0.39968774395003903,
"grad_norm": 1.313528299331665,
"learning_rate": 3.777924554357096e-05,
"loss": 0.8721,
"step": 32
},
{
"epoch": 0.41217798594847777,
"grad_norm": 1.3581171035766602,
"learning_rate": 3.509700894014496e-05,
"loss": 0.8682,
"step": 33
},
{
"epoch": 0.4246682279469165,
"grad_norm": 1.265177845954895,
"learning_rate": 3.250000000000001e-05,
"loss": 0.8279,
"step": 34
},
{
"epoch": 0.4371584699453552,
"grad_norm": 1.4047905206680298,
"learning_rate": 2.9999339514117912e-05,
"loss": 0.9059,
"step": 35
},
{
"epoch": 0.4496487119437939,
"grad_norm": 1.6240432262420654,
"learning_rate": 2.760573569460757e-05,
"loss": 0.9126,
"step": 36
},
{
"epoch": 0.46213895394223264,
"grad_norm": 1.7853037118911743,
"learning_rate": 2.53294383204969e-05,
"loss": 0.8382,
"step": 37
},
{
"epoch": 0.4746291959406714,
"grad_norm": 1.212027907371521,
"learning_rate": 2.3180194846605367e-05,
"loss": 1.047,
"step": 38
},
{
"epoch": 0.48711943793911006,
"grad_norm": 1.175856113433838,
"learning_rate": 2.1167208663446025e-05,
"loss": 0.9885,
"step": 39
},
{
"epoch": 0.4996096799375488,
"grad_norm": 1.1697721481323242,
"learning_rate": 1.9299099686894423e-05,
"loss": 0.9376,
"step": 40
},
{
"epoch": 0.5120999219359875,
"grad_norm": 1.0764572620391846,
"learning_rate": 1.758386744638546e-05,
"loss": 0.921,
"step": 41
},
{
"epoch": 0.5245901639344263,
"grad_norm": 1.1662276983261108,
"learning_rate": 1.602885682970026e-05,
"loss": 0.8766,
"step": 42
},
{
"epoch": 0.537080405932865,
"grad_norm": 1.0305904150009155,
"learning_rate": 1.464072663102903e-05,
"loss": 0.823,
"step": 43
},
{
"epoch": 0.5495706479313037,
"grad_norm": 1.1411957740783691,
"learning_rate": 1.3425421036992098e-05,
"loss": 0.8284,
"step": 44
},
{
"epoch": 0.5620608899297423,
"grad_norm": 1.1754179000854492,
"learning_rate": 1.2388144172720251e-05,
"loss": 0.8136,
"step": 45
},
{
"epoch": 0.5745511319281811,
"grad_norm": 1.0738892555236816,
"learning_rate": 1.1533337816991932e-05,
"loss": 0.7073,
"step": 46
},
{
"epoch": 0.5870413739266198,
"grad_norm": 1.1244994401931763,
"learning_rate": 1.0864662381854632e-05,
"loss": 0.7384,
"step": 47
},
{
"epoch": 0.5995316159250585,
"grad_norm": 1.3134185075759888,
"learning_rate": 1.0384981238178534e-05,
"loss": 0.8129,
"step": 48
},
{
"epoch": 0.6120218579234973,
"grad_norm": 1.5741225481033325,
"learning_rate": 1.0096348454262845e-05,
"loss": 0.7561,
"step": 49
},
{
"epoch": 0.624512099921936,
"grad_norm": 1.8192217350006104,
"learning_rate": 1e-05,
"loss": 0.8404,
"step": 50
},
{
"epoch": 0.624512099921936,
"eval_loss": 0.9218409657478333,
"eval_runtime": 3.4665,
"eval_samples_per_second": 14.424,
"eval_steps_per_second": 3.75,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}