ClaimVer_SOLAR-10.7B-Chat / trainer_state.json
preetam7's picture
Upload 10 files
dbb1682 verified
raw
history blame
9.97 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9952941176470587,
"eval_steps": 500,
"global_step": 212,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.047058823529411764,
"grad_norm": 1.2597168684005737,
"learning_rate": 4.9931407070965254e-05,
"loss": 0.7844,
"num_input_tokens_seen": 113104,
"step": 5
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.8212803602218628,
"learning_rate": 4.97260046830541e-05,
"loss": 0.4838,
"num_input_tokens_seen": 222672,
"step": 10
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.713465690612793,
"learning_rate": 4.9384919968379945e-05,
"loss": 0.425,
"num_input_tokens_seen": 334464,
"step": 15
},
{
"epoch": 0.18823529411764706,
"grad_norm": 0.6203038692474365,
"learning_rate": 4.891002460691306e-05,
"loss": 0.4034,
"num_input_tokens_seen": 450112,
"step": 20
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.6142417788505554,
"learning_rate": 4.83039245557597e-05,
"loss": 0.3746,
"num_input_tokens_seen": 561888,
"step": 25
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.5748819708824158,
"learning_rate": 4.756994574914359e-05,
"loss": 0.3394,
"num_input_tokens_seen": 674576,
"step": 30
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.5614773631095886,
"learning_rate": 4.6712115847560355e-05,
"loss": 0.3312,
"num_input_tokens_seen": 791472,
"step": 35
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.5544697642326355,
"learning_rate": 4.573514213625505e-05,
"loss": 0.3277,
"num_input_tokens_seen": 905712,
"step": 40
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.5900682806968689,
"learning_rate": 4.464438569430354e-05,
"loss": 0.3274,
"num_input_tokens_seen": 1024304,
"step": 45
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.5739061236381531,
"learning_rate": 4.344583197604318e-05,
"loss": 0.3074,
"num_input_tokens_seen": 1139904,
"step": 50
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.5653313994407654,
"learning_rate": 4.214605796628527e-05,
"loss": 0.3147,
"num_input_tokens_seen": 1257504,
"step": 55
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.6587158441543579,
"learning_rate": 4.075219608954278e-05,
"loss": 0.3017,
"num_input_tokens_seen": 1374272,
"step": 60
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.550284743309021,
"learning_rate": 3.927189507131938e-05,
"loss": 0.2992,
"num_input_tokens_seen": 1487248,
"step": 65
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.5366208553314209,
"learning_rate": 3.7713277966230514e-05,
"loss": 0.3,
"num_input_tokens_seen": 1598400,
"step": 70
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.5543321967124939,
"learning_rate": 3.608489758327472e-05,
"loss": 0.3018,
"num_input_tokens_seen": 1717296,
"step": 75
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.5324147343635559,
"learning_rate": 3.4395689552855955e-05,
"loss": 0.2904,
"num_input_tokens_seen": 1831296,
"step": 80
},
{
"epoch": 0.8,
"grad_norm": 0.592769980430603,
"learning_rate": 3.265492329309867e-05,
"loss": 0.2861,
"num_input_tokens_seen": 1947360,
"step": 85
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.583766758441925,
"learning_rate": 3.0872151144524595e-05,
"loss": 0.2923,
"num_input_tokens_seen": 2054512,
"step": 90
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.5162491202354431,
"learning_rate": 2.9057155952211502e-05,
"loss": 0.2831,
"num_input_tokens_seen": 2164752,
"step": 95
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.5491448044776917,
"learning_rate": 2.7219897383073373e-05,
"loss": 0.2779,
"num_input_tokens_seen": 2277856,
"step": 100
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.5450201034545898,
"learning_rate": 2.537045727284232e-05,
"loss": 0.2633,
"num_input_tokens_seen": 2388064,
"step": 105
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.5881150364875793,
"learning_rate": 2.3518984302657146e-05,
"loss": 0.2631,
"num_input_tokens_seen": 2506096,
"step": 110
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.5562729835510254,
"learning_rate": 2.1675638308842145e-05,
"loss": 0.264,
"num_input_tokens_seen": 2623072,
"step": 115
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.6210693120956421,
"learning_rate": 1.9850534531472546e-05,
"loss": 0.2579,
"num_input_tokens_seen": 2738576,
"step": 120
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.5652827024459839,
"learning_rate": 1.8053688107658908e-05,
"loss": 0.2674,
"num_input_tokens_seen": 2852704,
"step": 125
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.5649927854537964,
"learning_rate": 1.6294959114140034e-05,
"loss": 0.2542,
"num_input_tokens_seen": 2967184,
"step": 130
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.5547770261764526,
"learning_rate": 1.4583998460759424e-05,
"loss": 0.2499,
"num_input_tokens_seen": 3078944,
"step": 135
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.6156982779502869,
"learning_rate": 1.2930194931731382e-05,
"loss": 0.2594,
"num_input_tokens_seen": 3191008,
"step": 140
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.5913279056549072,
"learning_rate": 1.1342623665304209e-05,
"loss": 0.2696,
"num_input_tokens_seen": 3308176,
"step": 145
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.5994729995727539,
"learning_rate": 9.829996354535172e-06,
"loss": 0.2582,
"num_input_tokens_seen": 3420480,
"step": 150
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.7692103385925293,
"learning_rate": 8.400613442446948e-06,
"loss": 0.2639,
"num_input_tokens_seen": 3535280,
"step": 155
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.5787561535835266,
"learning_rate": 7.062318573891716e-06,
"loss": 0.2628,
"num_input_tokens_seen": 3648256,
"step": 160
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.5587510466575623,
"learning_rate": 5.822455554065217e-06,
"loss": 0.2596,
"num_input_tokens_seen": 3764400,
"step": 165
},
{
"epoch": 1.6,
"grad_norm": 0.5860652327537537,
"learning_rate": 4.687828049857967e-06,
"loss": 0.2438,
"num_input_tokens_seen": 3873456,
"step": 170
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.5650219321250916,
"learning_rate": 3.6646622551801345e-06,
"loss": 0.2388,
"num_input_tokens_seen": 3987808,
"step": 175
},
{
"epoch": 1.6941176470588235,
"grad_norm": 0.6057121753692627,
"learning_rate": 2.75857272513132e-06,
"loss": 0.2587,
"num_input_tokens_seen": 4102208,
"step": 180
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.6009911894798279,
"learning_rate": 1.9745315664982276e-06,
"loss": 0.2458,
"num_input_tokens_seen": 4214528,
"step": 185
},
{
"epoch": 1.788235294117647,
"grad_norm": 0.5280789732933044,
"learning_rate": 1.3168411536452152e-06,
"loss": 0.2395,
"num_input_tokens_seen": 4328288,
"step": 190
},
{
"epoch": 1.835294117647059,
"grad_norm": 0.57253098487854,
"learning_rate": 7.891105195175358e-07,
"loss": 0.2522,
"num_input_tokens_seen": 4447136,
"step": 195
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.5598665475845337,
"learning_rate": 3.9423555131007925e-07,
"loss": 0.2553,
"num_input_tokens_seen": 4561008,
"step": 200
},
{
"epoch": 1.9294117647058824,
"grad_norm": 0.5753045678138733,
"learning_rate": 1.343830994765982e-07,
"loss": 0.2445,
"num_input_tokens_seen": 4673504,
"step": 205
},
{
"epoch": 1.9764705882352942,
"grad_norm": 0.5889866352081299,
"learning_rate": 1.0979087280141298e-08,
"loss": 0.2542,
"num_input_tokens_seen": 4784784,
"step": 210
},
{
"epoch": 1.9952941176470587,
"num_input_tokens_seen": 4830400,
"step": 212,
"total_flos": 3.081382534064374e+17,
"train_loss": 0.30173668602727494,
"train_runtime": 3949.4939,
"train_samples_per_second": 1.722,
"train_steps_per_second": 0.054
}
],
"logging_steps": 5,
"max_steps": 212,
"num_input_tokens_seen": 4830400,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.081382534064374e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}