prxy5605's picture
Training in progress, step 50, checkpoint
344ce30 verified
raw
history blame
9.79 kB
{
"best_metric": 1.1298441886901855,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.16891891891891891,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0033783783783783786,
"grad_norm": 3.826683282852173,
"learning_rate": 5e-06,
"loss": 1.4948,
"step": 1
},
{
"epoch": 0.0033783783783783786,
"eval_loss": 2.366645336151123,
"eval_runtime": 45.2663,
"eval_samples_per_second": 11.024,
"eval_steps_per_second": 5.523,
"step": 1
},
{
"epoch": 0.006756756756756757,
"grad_norm": 4.3141398429870605,
"learning_rate": 1e-05,
"loss": 1.6839,
"step": 2
},
{
"epoch": 0.010135135135135136,
"grad_norm": 4.097876071929932,
"learning_rate": 1.5e-05,
"loss": 1.5752,
"step": 3
},
{
"epoch": 0.013513513513513514,
"grad_norm": 4.010673522949219,
"learning_rate": 2e-05,
"loss": 1.5405,
"step": 4
},
{
"epoch": 0.016891891891891893,
"grad_norm": 3.4538493156433105,
"learning_rate": 2.5e-05,
"loss": 1.5342,
"step": 5
},
{
"epoch": 0.02027027027027027,
"grad_norm": 2.8183417320251465,
"learning_rate": 3e-05,
"loss": 1.3748,
"step": 6
},
{
"epoch": 0.02364864864864865,
"grad_norm": 2.11971378326416,
"learning_rate": 3.5e-05,
"loss": 1.2566,
"step": 7
},
{
"epoch": 0.02702702702702703,
"grad_norm": 1.5489553213119507,
"learning_rate": 4e-05,
"loss": 1.2766,
"step": 8
},
{
"epoch": 0.030405405405405407,
"grad_norm": 1.3374286890029907,
"learning_rate": 4.5e-05,
"loss": 1.1642,
"step": 9
},
{
"epoch": 0.033783783783783786,
"grad_norm": 1.4814637899398804,
"learning_rate": 5e-05,
"loss": 1.1382,
"step": 10
},
{
"epoch": 0.037162162162162164,
"grad_norm": 1.3145912885665894,
"learning_rate": 5.500000000000001e-05,
"loss": 1.0861,
"step": 11
},
{
"epoch": 0.04054054054054054,
"grad_norm": 1.5343668460845947,
"learning_rate": 6e-05,
"loss": 1.1041,
"step": 12
},
{
"epoch": 0.04391891891891892,
"grad_norm": 1.3800190687179565,
"learning_rate": 6.500000000000001e-05,
"loss": 1.0442,
"step": 13
},
{
"epoch": 0.0472972972972973,
"grad_norm": 1.1731736660003662,
"learning_rate": 7e-05,
"loss": 1.025,
"step": 14
},
{
"epoch": 0.05067567567567568,
"grad_norm": 1.0756690502166748,
"learning_rate": 7.500000000000001e-05,
"loss": 1.077,
"step": 15
},
{
"epoch": 0.05405405405405406,
"grad_norm": 1.126670479774475,
"learning_rate": 8e-05,
"loss": 1.0699,
"step": 16
},
{
"epoch": 0.057432432432432436,
"grad_norm": 1.2052531242370605,
"learning_rate": 8.5e-05,
"loss": 1.0481,
"step": 17
},
{
"epoch": 0.060810810810810814,
"grad_norm": 1.0971605777740479,
"learning_rate": 9e-05,
"loss": 1.1747,
"step": 18
},
{
"epoch": 0.06418918918918919,
"grad_norm": 1.2357207536697388,
"learning_rate": 9.5e-05,
"loss": 1.2298,
"step": 19
},
{
"epoch": 0.06756756756756757,
"grad_norm": 1.0170912742614746,
"learning_rate": 0.0001,
"loss": 0.963,
"step": 20
},
{
"epoch": 0.07094594594594594,
"grad_norm": 1.040585994720459,
"learning_rate": 9.999238475781957e-05,
"loss": 1.0414,
"step": 21
},
{
"epoch": 0.07432432432432433,
"grad_norm": 1.0849173069000244,
"learning_rate": 9.99695413509548e-05,
"loss": 0.9838,
"step": 22
},
{
"epoch": 0.0777027027027027,
"grad_norm": 1.2504794597625732,
"learning_rate": 9.99314767377287e-05,
"loss": 1.132,
"step": 23
},
{
"epoch": 0.08108108108108109,
"grad_norm": 1.1654671430587769,
"learning_rate": 9.987820251299122e-05,
"loss": 1.1147,
"step": 24
},
{
"epoch": 0.08445945945945946,
"grad_norm": 1.2212613821029663,
"learning_rate": 9.980973490458728e-05,
"loss": 1.0757,
"step": 25
},
{
"epoch": 0.08783783783783784,
"grad_norm": 1.6377768516540527,
"learning_rate": 9.972609476841367e-05,
"loss": 1.1149,
"step": 26
},
{
"epoch": 0.09121621621621621,
"grad_norm": 1.4654461145401,
"learning_rate": 9.962730758206611e-05,
"loss": 1.0161,
"step": 27
},
{
"epoch": 0.0945945945945946,
"grad_norm": 1.5125370025634766,
"learning_rate": 9.951340343707852e-05,
"loss": 1.1866,
"step": 28
},
{
"epoch": 0.09797297297297297,
"grad_norm": 1.3907262086868286,
"learning_rate": 9.938441702975689e-05,
"loss": 1.0566,
"step": 29
},
{
"epoch": 0.10135135135135136,
"grad_norm": 1.5802035331726074,
"learning_rate": 9.924038765061042e-05,
"loss": 1.2932,
"step": 30
},
{
"epoch": 0.10472972972972973,
"grad_norm": 1.4079865217208862,
"learning_rate": 9.908135917238321e-05,
"loss": 1.1993,
"step": 31
},
{
"epoch": 0.10810810810810811,
"grad_norm": 1.471291184425354,
"learning_rate": 9.890738003669029e-05,
"loss": 1.2195,
"step": 32
},
{
"epoch": 0.11148648648648649,
"grad_norm": 1.8197413682937622,
"learning_rate": 9.871850323926177e-05,
"loss": 1.1169,
"step": 33
},
{
"epoch": 0.11486486486486487,
"grad_norm": 1.589645504951477,
"learning_rate": 9.851478631379982e-05,
"loss": 1.2959,
"step": 34
},
{
"epoch": 0.11824324324324324,
"grad_norm": 1.9382315874099731,
"learning_rate": 9.829629131445342e-05,
"loss": 1.1117,
"step": 35
},
{
"epoch": 0.12162162162162163,
"grad_norm": 1.9313946962356567,
"learning_rate": 9.806308479691595e-05,
"loss": 1.1987,
"step": 36
},
{
"epoch": 0.125,
"grad_norm": 1.8206257820129395,
"learning_rate": 9.781523779815179e-05,
"loss": 1.1417,
"step": 37
},
{
"epoch": 0.12837837837837837,
"grad_norm": 1.890135645866394,
"learning_rate": 9.755282581475769e-05,
"loss": 1.094,
"step": 38
},
{
"epoch": 0.13175675675675674,
"grad_norm": 2.088564872741699,
"learning_rate": 9.727592877996585e-05,
"loss": 1.2143,
"step": 39
},
{
"epoch": 0.13513513513513514,
"grad_norm": 1.9415384531021118,
"learning_rate": 9.698463103929542e-05,
"loss": 1.197,
"step": 40
},
{
"epoch": 0.13851351351351351,
"grad_norm": 1.7540833950042725,
"learning_rate": 9.667902132486009e-05,
"loss": 1.0637,
"step": 41
},
{
"epoch": 0.14189189189189189,
"grad_norm": 1.8551878929138184,
"learning_rate": 9.635919272833938e-05,
"loss": 1.0685,
"step": 42
},
{
"epoch": 0.14527027027027026,
"grad_norm": 1.9881539344787598,
"learning_rate": 9.602524267262203e-05,
"loss": 1.081,
"step": 43
},
{
"epoch": 0.14864864864864866,
"grad_norm": 2.670828104019165,
"learning_rate": 9.567727288213005e-05,
"loss": 1.2052,
"step": 44
},
{
"epoch": 0.15202702702702703,
"grad_norm": 2.176063060760498,
"learning_rate": 9.53153893518325e-05,
"loss": 0.856,
"step": 45
},
{
"epoch": 0.1554054054054054,
"grad_norm": 2.3070170879364014,
"learning_rate": 9.493970231495835e-05,
"loss": 0.9129,
"step": 46
},
{
"epoch": 0.15878378378378377,
"grad_norm": 2.7001495361328125,
"learning_rate": 9.45503262094184e-05,
"loss": 0.9219,
"step": 47
},
{
"epoch": 0.16216216216216217,
"grad_norm": 2.964418888092041,
"learning_rate": 9.414737964294636e-05,
"loss": 0.9442,
"step": 48
},
{
"epoch": 0.16554054054054054,
"grad_norm": 3.5293803215026855,
"learning_rate": 9.373098535696979e-05,
"loss": 1.1814,
"step": 49
},
{
"epoch": 0.16891891891891891,
"grad_norm": 3.7675538063049316,
"learning_rate": 9.330127018922194e-05,
"loss": 0.9654,
"step": 50
},
{
"epoch": 0.16891891891891891,
"eval_loss": 1.1298441886901855,
"eval_runtime": 46.0078,
"eval_samples_per_second": 10.846,
"eval_steps_per_second": 5.434,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.855493374122394e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}