jssky's picture
Training in progress, step 50, checkpoint
d8058e7 verified
raw
history blame
10.2 kB
{
"best_metric": 3.357673168182373,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 1.9026128266033253,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03800475059382423,
"grad_norm": 0.33629336953163147,
"learning_rate": 5e-05,
"loss": 3.669,
"step": 1
},
{
"epoch": 0.03800475059382423,
"eval_loss": 3.8881068229675293,
"eval_runtime": 2.4606,
"eval_samples_per_second": 72.339,
"eval_steps_per_second": 9.347,
"step": 1
},
{
"epoch": 0.07600950118764846,
"grad_norm": 0.42253753542900085,
"learning_rate": 0.0001,
"loss": 3.6932,
"step": 2
},
{
"epoch": 0.11401425178147269,
"grad_norm": 0.4703884422779083,
"learning_rate": 9.989294616193017e-05,
"loss": 3.6224,
"step": 3
},
{
"epoch": 0.15201900237529692,
"grad_norm": 0.5285553932189941,
"learning_rate": 9.957224306869053e-05,
"loss": 3.72,
"step": 4
},
{
"epoch": 0.19002375296912113,
"grad_norm": 0.5211747884750366,
"learning_rate": 9.903926402016153e-05,
"loss": 3.7434,
"step": 5
},
{
"epoch": 0.22802850356294538,
"grad_norm": 0.5990889072418213,
"learning_rate": 9.829629131445342e-05,
"loss": 3.9806,
"step": 6
},
{
"epoch": 0.2660332541567696,
"grad_norm": 0.28917452692985535,
"learning_rate": 9.73465064747553e-05,
"loss": 3.6418,
"step": 7
},
{
"epoch": 0.30403800475059384,
"grad_norm": 0.3966493308544159,
"learning_rate": 9.619397662556435e-05,
"loss": 3.571,
"step": 8
},
{
"epoch": 0.342042755344418,
"grad_norm": 0.4186142385005951,
"learning_rate": 9.484363707663442e-05,
"loss": 3.5154,
"step": 9
},
{
"epoch": 0.38004750593824227,
"grad_norm": 0.4471973776817322,
"learning_rate": 9.330127018922194e-05,
"loss": 3.4972,
"step": 10
},
{
"epoch": 0.4180522565320665,
"grad_norm": 0.5375720262527466,
"learning_rate": 9.157348061512727e-05,
"loss": 3.5775,
"step": 11
},
{
"epoch": 0.45605700712589076,
"grad_norm": 0.5469061136245728,
"learning_rate": 8.966766701456177e-05,
"loss": 3.6396,
"step": 12
},
{
"epoch": 0.49406175771971494,
"grad_norm": 0.7819376587867737,
"learning_rate": 8.759199037394887e-05,
"loss": 3.9765,
"step": 13
},
{
"epoch": 0.5320665083135392,
"grad_norm": 0.3394373655319214,
"learning_rate": 8.535533905932738e-05,
"loss": 3.5144,
"step": 14
},
{
"epoch": 0.5700712589073634,
"grad_norm": 0.3765534460544586,
"learning_rate": 8.296729075500344e-05,
"loss": 3.4602,
"step": 15
},
{
"epoch": 0.6080760095011877,
"grad_norm": 0.36004090309143066,
"learning_rate": 8.043807145043604e-05,
"loss": 3.3679,
"step": 16
},
{
"epoch": 0.6460807600950119,
"grad_norm": 0.40536168217658997,
"learning_rate": 7.777851165098012e-05,
"loss": 3.402,
"step": 17
},
{
"epoch": 0.684085510688836,
"grad_norm": 0.43434950709342957,
"learning_rate": 7.500000000000001e-05,
"loss": 3.2927,
"step": 18
},
{
"epoch": 0.7220902612826603,
"grad_norm": 0.4626035988330841,
"learning_rate": 7.211443451095007e-05,
"loss": 3.6605,
"step": 19
},
{
"epoch": 0.7600950118764845,
"grad_norm": 0.38905563950538635,
"learning_rate": 6.91341716182545e-05,
"loss": 3.4843,
"step": 20
},
{
"epoch": 0.7980997624703088,
"grad_norm": 0.4623677432537079,
"learning_rate": 6.607197326515808e-05,
"loss": 3.4253,
"step": 21
},
{
"epoch": 0.836104513064133,
"grad_norm": 0.5135887861251831,
"learning_rate": 6.294095225512603e-05,
"loss": 3.3537,
"step": 22
},
{
"epoch": 0.8741092636579573,
"grad_norm": 0.48013439774513245,
"learning_rate": 5.9754516100806423e-05,
"loss": 3.4339,
"step": 23
},
{
"epoch": 0.9121140142517815,
"grad_norm": 0.40351372957229614,
"learning_rate": 5.6526309611002594e-05,
"loss": 3.1647,
"step": 24
},
{
"epoch": 0.9501187648456056,
"grad_norm": 0.44421958923339844,
"learning_rate": 5.327015646150716e-05,
"loss": 3.4674,
"step": 25
},
{
"epoch": 0.9501187648456056,
"eval_loss": 3.4557735919952393,
"eval_runtime": 2.4794,
"eval_samples_per_second": 71.79,
"eval_steps_per_second": 9.276,
"step": 25
},
{
"epoch": 0.9881235154394299,
"grad_norm": 0.6033377051353455,
"learning_rate": 5e-05,
"loss": 3.8401,
"step": 26
},
{
"epoch": 1.0285035629453683,
"grad_norm": 0.6220775246620178,
"learning_rate": 4.6729843538492847e-05,
"loss": 6.2136,
"step": 27
},
{
"epoch": 1.0665083135391924,
"grad_norm": 0.3605939447879791,
"learning_rate": 4.347369038899744e-05,
"loss": 3.0169,
"step": 28
},
{
"epoch": 1.1045130641330165,
"grad_norm": 0.4560985863208771,
"learning_rate": 4.0245483899193595e-05,
"loss": 3.3652,
"step": 29
},
{
"epoch": 1.1425178147268409,
"grad_norm": 0.5514870285987854,
"learning_rate": 3.705904774487396e-05,
"loss": 3.1878,
"step": 30
},
{
"epoch": 1.180522565320665,
"grad_norm": 0.5774045586585999,
"learning_rate": 3.392802673484193e-05,
"loss": 3.0743,
"step": 31
},
{
"epoch": 1.2185273159144894,
"grad_norm": 0.7691540122032166,
"learning_rate": 3.086582838174551e-05,
"loss": 3.6026,
"step": 32
},
{
"epoch": 1.2565320665083135,
"grad_norm": 0.38054758310317993,
"learning_rate": 2.7885565489049946e-05,
"loss": 2.9451,
"step": 33
},
{
"epoch": 1.2945368171021379,
"grad_norm": 0.44550177454948425,
"learning_rate": 2.500000000000001e-05,
"loss": 3.8813,
"step": 34
},
{
"epoch": 1.332541567695962,
"grad_norm": 0.5434937477111816,
"learning_rate": 2.2221488349019903e-05,
"loss": 3.1944,
"step": 35
},
{
"epoch": 1.3705463182897861,
"grad_norm": 0.47107967734336853,
"learning_rate": 1.9561928549563968e-05,
"loss": 3.3142,
"step": 36
},
{
"epoch": 1.4085510688836105,
"grad_norm": 0.55793297290802,
"learning_rate": 1.703270924499656e-05,
"loss": 3.24,
"step": 37
},
{
"epoch": 1.4465558194774346,
"grad_norm": 0.7050827741622925,
"learning_rate": 1.4644660940672627e-05,
"loss": 3.4012,
"step": 38
},
{
"epoch": 1.484560570071259,
"grad_norm": 0.976097583770752,
"learning_rate": 1.2408009626051137e-05,
"loss": 3.7907,
"step": 39
},
{
"epoch": 1.522565320665083,
"grad_norm": 0.37063631415367126,
"learning_rate": 1.0332332985438248e-05,
"loss": 3.5599,
"step": 40
},
{
"epoch": 1.5605700712589075,
"grad_norm": 0.39631596207618713,
"learning_rate": 8.426519384872733e-06,
"loss": 3.1048,
"step": 41
},
{
"epoch": 1.5985748218527316,
"grad_norm": 0.4788654148578644,
"learning_rate": 6.698729810778065e-06,
"loss": 3.2364,
"step": 42
},
{
"epoch": 1.6365795724465557,
"grad_norm": 0.5167055130004883,
"learning_rate": 5.156362923365588e-06,
"loss": 3.1073,
"step": 43
},
{
"epoch": 1.67458432304038,
"grad_norm": 0.6170525550842285,
"learning_rate": 3.8060233744356633e-06,
"loss": 3.2586,
"step": 44
},
{
"epoch": 1.7125890736342044,
"grad_norm": 0.700023889541626,
"learning_rate": 2.653493525244721e-06,
"loss": 3.5919,
"step": 45
},
{
"epoch": 1.7505938242280283,
"grad_norm": 0.3638046085834503,
"learning_rate": 1.70370868554659e-06,
"loss": 2.8131,
"step": 46
},
{
"epoch": 1.7885985748218527,
"grad_norm": 0.4485277235507965,
"learning_rate": 9.607359798384785e-07,
"loss": 3.6761,
"step": 47
},
{
"epoch": 1.826603325415677,
"grad_norm": 0.4701801538467407,
"learning_rate": 4.277569313094809e-07,
"loss": 3.2881,
"step": 48
},
{
"epoch": 1.8646080760095012,
"grad_norm": 0.46767908334732056,
"learning_rate": 1.0705383806982606e-07,
"loss": 3.3644,
"step": 49
},
{
"epoch": 1.9026128266033253,
"grad_norm": 0.5206577181816101,
"learning_rate": 0.0,
"loss": 3.0493,
"step": 50
},
{
"epoch": 1.9026128266033253,
"eval_loss": 3.357673168182373,
"eval_runtime": 2.3659,
"eval_samples_per_second": 75.237,
"eval_steps_per_second": 9.722,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0671517724783411e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}