sales-prediction1 / trainer_state.json
tonyassi's picture
Upload folder using huggingface_hub
e09cc37 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 940,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10638297872340426,
"grad_norm": 12.057719230651855,
"learning_rate": 9.893617021276596e-05,
"loss": 0.2856,
"step": 10
},
{
"epoch": 0.2127659574468085,
"grad_norm": 9.51878547668457,
"learning_rate": 9.787234042553192e-05,
"loss": 0.1486,
"step": 20
},
{
"epoch": 0.3191489361702128,
"grad_norm": 4.131153106689453,
"learning_rate": 9.680851063829788e-05,
"loss": 0.0795,
"step": 30
},
{
"epoch": 0.425531914893617,
"grad_norm": 4.15078592300415,
"learning_rate": 9.574468085106384e-05,
"loss": 0.0634,
"step": 40
},
{
"epoch": 0.5319148936170213,
"grad_norm": 6.101027011871338,
"learning_rate": 9.468085106382978e-05,
"loss": 0.0293,
"step": 50
},
{
"epoch": 0.6382978723404256,
"grad_norm": 1.4290554523468018,
"learning_rate": 9.361702127659576e-05,
"loss": 0.02,
"step": 60
},
{
"epoch": 0.7446808510638298,
"grad_norm": 0.2031591683626175,
"learning_rate": 9.25531914893617e-05,
"loss": 0.0084,
"step": 70
},
{
"epoch": 0.851063829787234,
"grad_norm": 1.6767696142196655,
"learning_rate": 9.148936170212766e-05,
"loss": 0.0263,
"step": 80
},
{
"epoch": 0.9574468085106383,
"grad_norm": 2.562359094619751,
"learning_rate": 9.042553191489363e-05,
"loss": 0.0095,
"step": 90
},
{
"epoch": 1.0,
"eval_loss": 0.012819608673453331,
"eval_mse": 0.012819608673453331,
"eval_runtime": 13.8617,
"eval_samples_per_second": 13.49,
"eval_steps_per_second": 1.731,
"step": 94
},
{
"epoch": 1.0638297872340425,
"grad_norm": 4.054518222808838,
"learning_rate": 8.936170212765958e-05,
"loss": 0.0378,
"step": 100
},
{
"epoch": 1.1702127659574468,
"grad_norm": 1.0702558755874634,
"learning_rate": 8.829787234042553e-05,
"loss": 0.021,
"step": 110
},
{
"epoch": 1.2765957446808511,
"grad_norm": 3.5774471759796143,
"learning_rate": 8.723404255319149e-05,
"loss": 0.0087,
"step": 120
},
{
"epoch": 1.3829787234042552,
"grad_norm": 1.658952236175537,
"learning_rate": 8.617021276595745e-05,
"loss": 0.0188,
"step": 130
},
{
"epoch": 1.4893617021276595,
"grad_norm": 5.384657859802246,
"learning_rate": 8.510638297872341e-05,
"loss": 0.021,
"step": 140
},
{
"epoch": 1.5957446808510638,
"grad_norm": 6.726040840148926,
"learning_rate": 8.404255319148937e-05,
"loss": 0.0126,
"step": 150
},
{
"epoch": 1.702127659574468,
"grad_norm": 3.472024440765381,
"learning_rate": 8.297872340425533e-05,
"loss": 0.0069,
"step": 160
},
{
"epoch": 1.8085106382978724,
"grad_norm": 1.0799269676208496,
"learning_rate": 8.191489361702128e-05,
"loss": 0.0247,
"step": 170
},
{
"epoch": 1.9148936170212765,
"grad_norm": 2.496415138244629,
"learning_rate": 8.085106382978723e-05,
"loss": 0.0102,
"step": 180
},
{
"epoch": 2.0,
"eval_loss": 0.014377221465110779,
"eval_mse": 0.014377223327755928,
"eval_runtime": 13.5336,
"eval_samples_per_second": 13.817,
"eval_steps_per_second": 1.773,
"step": 188
},
{
"epoch": 2.021276595744681,
"grad_norm": 7.031186580657959,
"learning_rate": 7.978723404255319e-05,
"loss": 0.0146,
"step": 190
},
{
"epoch": 2.127659574468085,
"grad_norm": 1.6951093673706055,
"learning_rate": 7.872340425531916e-05,
"loss": 0.0102,
"step": 200
},
{
"epoch": 2.2340425531914896,
"grad_norm": 2.5294058322906494,
"learning_rate": 7.76595744680851e-05,
"loss": 0.0088,
"step": 210
},
{
"epoch": 2.3404255319148937,
"grad_norm": 0.2626056969165802,
"learning_rate": 7.659574468085106e-05,
"loss": 0.0089,
"step": 220
},
{
"epoch": 2.4468085106382977,
"grad_norm": 4.416263580322266,
"learning_rate": 7.553191489361703e-05,
"loss": 0.0211,
"step": 230
},
{
"epoch": 2.5531914893617023,
"grad_norm": 6.607398986816406,
"learning_rate": 7.446808510638298e-05,
"loss": 0.0114,
"step": 240
},
{
"epoch": 2.6595744680851063,
"grad_norm": 3.823686361312866,
"learning_rate": 7.340425531914894e-05,
"loss": 0.0113,
"step": 250
},
{
"epoch": 2.7659574468085104,
"grad_norm": 1.5682330131530762,
"learning_rate": 7.23404255319149e-05,
"loss": 0.0426,
"step": 260
},
{
"epoch": 2.872340425531915,
"grad_norm": 1.9752477407455444,
"learning_rate": 7.127659574468085e-05,
"loss": 0.0125,
"step": 270
},
{
"epoch": 2.978723404255319,
"grad_norm": 9.359685897827148,
"learning_rate": 7.021276595744681e-05,
"loss": 0.0293,
"step": 280
},
{
"epoch": 3.0,
"eval_loss": 0.008672923780977726,
"eval_mse": 0.008672923780977726,
"eval_runtime": 13.6388,
"eval_samples_per_second": 13.711,
"eval_steps_per_second": 1.76,
"step": 282
},
{
"epoch": 3.0851063829787235,
"grad_norm": 1.2124550342559814,
"learning_rate": 6.914893617021277e-05,
"loss": 0.0153,
"step": 290
},
{
"epoch": 3.1914893617021276,
"grad_norm": 1.8096394538879395,
"learning_rate": 6.808510638297873e-05,
"loss": 0.0041,
"step": 300
},
{
"epoch": 3.297872340425532,
"grad_norm": 0.448832631111145,
"learning_rate": 6.702127659574469e-05,
"loss": 0.0028,
"step": 310
},
{
"epoch": 3.404255319148936,
"grad_norm": 3.3972675800323486,
"learning_rate": 6.595744680851063e-05,
"loss": 0.0139,
"step": 320
},
{
"epoch": 3.5106382978723403,
"grad_norm": 0.708503246307373,
"learning_rate": 6.489361702127659e-05,
"loss": 0.0042,
"step": 330
},
{
"epoch": 3.617021276595745,
"grad_norm": 3.509138345718384,
"learning_rate": 6.382978723404256e-05,
"loss": 0.0101,
"step": 340
},
{
"epoch": 3.723404255319149,
"grad_norm": 0.675829291343689,
"learning_rate": 6.276595744680851e-05,
"loss": 0.0072,
"step": 350
},
{
"epoch": 3.829787234042553,
"grad_norm": 0.2924118936061859,
"learning_rate": 6.170212765957447e-05,
"loss": 0.0119,
"step": 360
},
{
"epoch": 3.9361702127659575,
"grad_norm": 0.0973350778222084,
"learning_rate": 6.063829787234043e-05,
"loss": 0.0083,
"step": 370
},
{
"epoch": 4.0,
"eval_loss": 0.00898202694952488,
"eval_mse": 0.00898202694952488,
"eval_runtime": 13.7858,
"eval_samples_per_second": 13.565,
"eval_steps_per_second": 1.741,
"step": 376
},
{
"epoch": 4.042553191489362,
"grad_norm": 1.605879783630371,
"learning_rate": 5.9574468085106384e-05,
"loss": 0.0138,
"step": 380
},
{
"epoch": 4.148936170212766,
"grad_norm": 3.2707700729370117,
"learning_rate": 5.851063829787234e-05,
"loss": 0.0051,
"step": 390
},
{
"epoch": 4.25531914893617,
"grad_norm": 0.4065483510494232,
"learning_rate": 5.744680851063831e-05,
"loss": 0.0068,
"step": 400
},
{
"epoch": 4.361702127659575,
"grad_norm": 3.0976128578186035,
"learning_rate": 5.638297872340426e-05,
"loss": 0.0081,
"step": 410
},
{
"epoch": 4.468085106382979,
"grad_norm": 2.073216438293457,
"learning_rate": 5.531914893617022e-05,
"loss": 0.0036,
"step": 420
},
{
"epoch": 4.574468085106383,
"grad_norm": 2.3855879306793213,
"learning_rate": 5.425531914893617e-05,
"loss": 0.0184,
"step": 430
},
{
"epoch": 4.680851063829787,
"grad_norm": 0.987083911895752,
"learning_rate": 5.319148936170213e-05,
"loss": 0.0138,
"step": 440
},
{
"epoch": 4.787234042553192,
"grad_norm": 0.3904077410697937,
"learning_rate": 5.212765957446809e-05,
"loss": 0.0076,
"step": 450
},
{
"epoch": 4.8936170212765955,
"grad_norm": 0.7371108531951904,
"learning_rate": 5.1063829787234044e-05,
"loss": 0.0166,
"step": 460
},
{
"epoch": 5.0,
"grad_norm": 1.442456603050232,
"learning_rate": 5e-05,
"loss": 0.0048,
"step": 470
},
{
"epoch": 5.0,
"eval_loss": 0.009333298541605473,
"eval_mse": 0.009333296678960323,
"eval_runtime": 13.8783,
"eval_samples_per_second": 13.474,
"eval_steps_per_second": 1.729,
"step": 470
},
{
"epoch": 5.1063829787234045,
"grad_norm": 0.8383665084838867,
"learning_rate": 4.893617021276596e-05,
"loss": 0.0075,
"step": 480
},
{
"epoch": 5.212765957446808,
"grad_norm": 0.11954376846551895,
"learning_rate": 4.787234042553192e-05,
"loss": 0.002,
"step": 490
},
{
"epoch": 5.319148936170213,
"grad_norm": 0.9294746518135071,
"learning_rate": 4.680851063829788e-05,
"loss": 0.0019,
"step": 500
},
{
"epoch": 5.425531914893617,
"grad_norm": 0.790674090385437,
"learning_rate": 4.574468085106383e-05,
"loss": 0.0117,
"step": 510
},
{
"epoch": 5.531914893617021,
"grad_norm": 1.9565446376800537,
"learning_rate": 4.468085106382979e-05,
"loss": 0.0049,
"step": 520
},
{
"epoch": 5.638297872340425,
"grad_norm": 2.9001126289367676,
"learning_rate": 4.3617021276595746e-05,
"loss": 0.004,
"step": 530
},
{
"epoch": 5.74468085106383,
"grad_norm": 1.8480255603790283,
"learning_rate": 4.2553191489361704e-05,
"loss": 0.0041,
"step": 540
},
{
"epoch": 5.851063829787234,
"grad_norm": 3.5987026691436768,
"learning_rate": 4.148936170212766e-05,
"loss": 0.0052,
"step": 550
},
{
"epoch": 5.957446808510638,
"grad_norm": 0.5824787616729736,
"learning_rate": 4.0425531914893614e-05,
"loss": 0.0028,
"step": 560
},
{
"epoch": 6.0,
"eval_loss": 0.006746174301952124,
"eval_mse": 0.006746174301952124,
"eval_runtime": 13.3949,
"eval_samples_per_second": 13.961,
"eval_steps_per_second": 1.792,
"step": 564
},
{
"epoch": 6.0638297872340425,
"grad_norm": 0.4667282700538635,
"learning_rate": 3.936170212765958e-05,
"loss": 0.0021,
"step": 570
},
{
"epoch": 6.170212765957447,
"grad_norm": 1.5866947174072266,
"learning_rate": 3.829787234042553e-05,
"loss": 0.0047,
"step": 580
},
{
"epoch": 6.276595744680851,
"grad_norm": 0.9419589042663574,
"learning_rate": 3.723404255319149e-05,
"loss": 0.0018,
"step": 590
},
{
"epoch": 6.382978723404255,
"grad_norm": 0.300542950630188,
"learning_rate": 3.617021276595745e-05,
"loss": 0.0012,
"step": 600
},
{
"epoch": 6.48936170212766,
"grad_norm": 0.741326093673706,
"learning_rate": 3.5106382978723407e-05,
"loss": 0.0035,
"step": 610
},
{
"epoch": 6.595744680851064,
"grad_norm": 1.8038232326507568,
"learning_rate": 3.4042553191489365e-05,
"loss": 0.0033,
"step": 620
},
{
"epoch": 6.702127659574468,
"grad_norm": 0.41880735754966736,
"learning_rate": 3.2978723404255317e-05,
"loss": 0.0048,
"step": 630
},
{
"epoch": 6.808510638297872,
"grad_norm": 0.6254183650016785,
"learning_rate": 3.191489361702128e-05,
"loss": 0.0018,
"step": 640
},
{
"epoch": 6.914893617021277,
"grad_norm": 0.13702178001403809,
"learning_rate": 3.085106382978723e-05,
"loss": 0.004,
"step": 650
},
{
"epoch": 7.0,
"eval_loss": 0.006701626814901829,
"eval_mse": 0.006701626814901829,
"eval_runtime": 13.4218,
"eval_samples_per_second": 13.933,
"eval_steps_per_second": 1.788,
"step": 658
},
{
"epoch": 7.0212765957446805,
"grad_norm": 0.8846572637557983,
"learning_rate": 2.9787234042553192e-05,
"loss": 0.0018,
"step": 660
},
{
"epoch": 7.127659574468085,
"grad_norm": 1.0834753513336182,
"learning_rate": 2.8723404255319154e-05,
"loss": 0.0007,
"step": 670
},
{
"epoch": 7.23404255319149,
"grad_norm": 0.28342002630233765,
"learning_rate": 2.765957446808511e-05,
"loss": 0.0026,
"step": 680
},
{
"epoch": 7.340425531914893,
"grad_norm": 0.33790862560272217,
"learning_rate": 2.6595744680851064e-05,
"loss": 0.0017,
"step": 690
},
{
"epoch": 7.446808510638298,
"grad_norm": 1.4947048425674438,
"learning_rate": 2.5531914893617022e-05,
"loss": 0.0023,
"step": 700
},
{
"epoch": 7.553191489361702,
"grad_norm": 1.0011037588119507,
"learning_rate": 2.446808510638298e-05,
"loss": 0.0014,
"step": 710
},
{
"epoch": 7.659574468085106,
"grad_norm": 0.686026930809021,
"learning_rate": 2.340425531914894e-05,
"loss": 0.0008,
"step": 720
},
{
"epoch": 7.76595744680851,
"grad_norm": 0.06300849467515945,
"learning_rate": 2.2340425531914894e-05,
"loss": 0.0016,
"step": 730
},
{
"epoch": 7.872340425531915,
"grad_norm": 0.5408380627632141,
"learning_rate": 2.1276595744680852e-05,
"loss": 0.0008,
"step": 740
},
{
"epoch": 7.9787234042553195,
"grad_norm": 0.07452932745218277,
"learning_rate": 2.0212765957446807e-05,
"loss": 0.0007,
"step": 750
},
{
"epoch": 8.0,
"eval_loss": 0.006444782949984074,
"eval_mse": 0.006444782949984074,
"eval_runtime": 13.594,
"eval_samples_per_second": 13.756,
"eval_steps_per_second": 1.765,
"step": 752
},
{
"epoch": 8.085106382978724,
"grad_norm": 0.5107563138008118,
"learning_rate": 1.9148936170212766e-05,
"loss": 0.0003,
"step": 760
},
{
"epoch": 8.191489361702128,
"grad_norm": 0.2516929507255554,
"learning_rate": 1.8085106382978724e-05,
"loss": 0.0004,
"step": 770
},
{
"epoch": 8.297872340425531,
"grad_norm": 0.24071469902992249,
"learning_rate": 1.7021276595744682e-05,
"loss": 0.001,
"step": 780
},
{
"epoch": 8.404255319148936,
"grad_norm": 0.13089929521083832,
"learning_rate": 1.595744680851064e-05,
"loss": 0.0004,
"step": 790
},
{
"epoch": 8.51063829787234,
"grad_norm": 1.771623969078064,
"learning_rate": 1.4893617021276596e-05,
"loss": 0.0011,
"step": 800
},
{
"epoch": 8.617021276595745,
"grad_norm": 0.585198163986206,
"learning_rate": 1.3829787234042554e-05,
"loss": 0.0006,
"step": 810
},
{
"epoch": 8.72340425531915,
"grad_norm": 1.7595680952072144,
"learning_rate": 1.2765957446808511e-05,
"loss": 0.0011,
"step": 820
},
{
"epoch": 8.829787234042554,
"grad_norm": 0.2941080331802368,
"learning_rate": 1.170212765957447e-05,
"loss": 0.0003,
"step": 830
},
{
"epoch": 8.936170212765958,
"grad_norm": 1.3685917854309082,
"learning_rate": 1.0638297872340426e-05,
"loss": 0.0008,
"step": 840
},
{
"epoch": 9.0,
"eval_loss": 0.006674789357930422,
"eval_mse": 0.006674789357930422,
"eval_runtime": 13.6694,
"eval_samples_per_second": 13.68,
"eval_steps_per_second": 1.756,
"step": 846
},
{
"epoch": 9.042553191489361,
"grad_norm": 0.4333817660808563,
"learning_rate": 9.574468085106383e-06,
"loss": 0.0004,
"step": 850
},
{
"epoch": 9.148936170212766,
"grad_norm": 0.21346421539783478,
"learning_rate": 8.510638297872341e-06,
"loss": 0.0002,
"step": 860
},
{
"epoch": 9.25531914893617,
"grad_norm": 0.13360550999641418,
"learning_rate": 7.446808510638298e-06,
"loss": 0.0002,
"step": 870
},
{
"epoch": 9.361702127659575,
"grad_norm": 0.20817670226097107,
"learning_rate": 6.3829787234042555e-06,
"loss": 0.0003,
"step": 880
},
{
"epoch": 9.46808510638298,
"grad_norm": 0.19471751153469086,
"learning_rate": 5.319148936170213e-06,
"loss": 0.0004,
"step": 890
},
{
"epoch": 9.574468085106384,
"grad_norm": 0.11478572338819504,
"learning_rate": 4.255319148936171e-06,
"loss": 0.0001,
"step": 900
},
{
"epoch": 9.680851063829786,
"grad_norm": 0.46695956587791443,
"learning_rate": 3.1914893617021277e-06,
"loss": 0.0003,
"step": 910
},
{
"epoch": 9.787234042553191,
"grad_norm": 0.15428566932678223,
"learning_rate": 2.1276595744680853e-06,
"loss": 0.0016,
"step": 920
},
{
"epoch": 9.893617021276595,
"grad_norm": 0.1379678100347519,
"learning_rate": 1.0638297872340427e-06,
"loss": 0.0001,
"step": 930
},
{
"epoch": 10.0,
"grad_norm": 0.21880577504634857,
"learning_rate": 0.0,
"loss": 0.0002,
"step": 940
}
],
"logging_steps": 10,
"max_steps": 940,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}