lewtun's picture
lewtun HF staff
Add HuggingFaceH4/mistral-7b-ift-v12.2 checkpoint
265ad17
raw
history blame
7.96 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5435801312089972,
"eval_steps": 500,
"global_step": 290,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 3.7037037037037036e-07,
"loss": 1.7019,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 1.8518518518518519e-06,
"loss": 1.5901,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 3.7037037037037037e-06,
"loss": 1.3226,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 5.555555555555557e-06,
"loss": 1.1733,
"step": 15
},
{
"epoch": 0.04,
"learning_rate": 7.4074074074074075e-06,
"loss": 1.0978,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 9.25925925925926e-06,
"loss": 1.0591,
"step": 25
},
{
"epoch": 0.06,
"learning_rate": 1.1111111111111113e-05,
"loss": 1.0304,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 1.2962962962962964e-05,
"loss": 1.0116,
"step": 35
},
{
"epoch": 0.07,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.9904,
"step": 40
},
{
"epoch": 0.08,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.9896,
"step": 45
},
{
"epoch": 0.09,
"learning_rate": 1.851851851851852e-05,
"loss": 0.9806,
"step": 50
},
{
"epoch": 0.1,
"learning_rate": 1.999978492141723e-05,
"loss": 0.9673,
"step": 55
},
{
"epoch": 0.11,
"learning_rate": 1.9992258142410335e-05,
"loss": 0.9732,
"step": 60
},
{
"epoch": 0.12,
"learning_rate": 1.997398668423542e-05,
"loss": 0.9526,
"step": 65
},
{
"epoch": 0.13,
"learning_rate": 1.9944990194198758e-05,
"loss": 0.9571,
"step": 70
},
{
"epoch": 0.14,
"learning_rate": 1.9905299852237654e-05,
"loss": 0.9498,
"step": 75
},
{
"epoch": 0.15,
"learning_rate": 1.9854958337392654e-05,
"loss": 0.959,
"step": 80
},
{
"epoch": 0.16,
"learning_rate": 1.9794019781914764e-05,
"loss": 0.9538,
"step": 85
},
{
"epoch": 0.17,
"learning_rate": 1.9722549713057008e-05,
"loss": 0.9449,
"step": 90
},
{
"epoch": 0.18,
"learning_rate": 1.9640624982612943e-05,
"loss": 0.9497,
"step": 95
},
{
"epoch": 0.19,
"learning_rate": 1.954833368427792e-05,
"loss": 0.9595,
"step": 100
},
{
"epoch": 0.2,
"learning_rate": 1.9445775058921856e-05,
"loss": 0.9563,
"step": 105
},
{
"epoch": 0.21,
"learning_rate": 1.9333059387875527e-05,
"loss": 0.9415,
"step": 110
},
{
"epoch": 0.22,
"learning_rate": 1.921030787434499e-05,
"loss": 0.9432,
"step": 115
},
{
"epoch": 0.22,
"learning_rate": 1.907765251308173e-05,
"loss": 0.9484,
"step": 120
},
{
"epoch": 0.23,
"learning_rate": 1.8935235948448653e-05,
"loss": 0.939,
"step": 125
},
{
"epoch": 0.24,
"learning_rate": 1.8783211321034537e-05,
"loss": 0.9326,
"step": 130
},
{
"epoch": 0.25,
"learning_rate": 1.8621742102981906e-05,
"loss": 0.9485,
"step": 135
},
{
"epoch": 0.26,
"learning_rate": 1.845100192220537e-05,
"loss": 0.9401,
"step": 140
},
{
"epoch": 0.27,
"learning_rate": 1.8271174375689454e-05,
"loss": 0.9415,
"step": 145
},
{
"epoch": 0.28,
"learning_rate": 1.808245283206669e-05,
"loss": 0.9439,
"step": 150
},
{
"epoch": 0.29,
"learning_rate": 1.7885040223688234e-05,
"loss": 0.9423,
"step": 155
},
{
"epoch": 0.3,
"learning_rate": 1.767914882841067e-05,
"loss": 0.9422,
"step": 160
},
{
"epoch": 0.31,
"learning_rate": 1.7465000041333496e-05,
"loss": 0.933,
"step": 165
},
{
"epoch": 0.32,
"learning_rate": 1.724282413673291e-05,
"loss": 0.9288,
"step": 170
},
{
"epoch": 0.33,
"learning_rate": 1.7012860020447797e-05,
"loss": 0.937,
"step": 175
},
{
"epoch": 0.34,
"learning_rate": 1.677535497298416e-05,
"loss": 0.9411,
"step": 180
},
{
"epoch": 0.35,
"learning_rate": 1.653056438361432e-05,
"loss": 0.9327,
"step": 185
},
{
"epoch": 0.36,
"learning_rate": 1.6278751475756712e-05,
"loss": 0.934,
"step": 190
},
{
"epoch": 0.37,
"learning_rate": 1.602018702393164e-05,
"loss": 0.9242,
"step": 195
},
{
"epoch": 0.37,
"learning_rate": 1.5755149062597332e-05,
"loss": 0.9264,
"step": 200
},
{
"epoch": 0.38,
"learning_rate": 1.5483922587179386e-05,
"loss": 0.9343,
"step": 205
},
{
"epoch": 0.39,
"learning_rate": 1.5206799247615038e-05,
"loss": 0.9194,
"step": 210
},
{
"epoch": 0.4,
"learning_rate": 1.4924077034741924e-05,
"loss": 0.9306,
"step": 215
},
{
"epoch": 0.41,
"learning_rate": 1.4636059959868363e-05,
"loss": 0.918,
"step": 220
},
{
"epoch": 0.42,
"learning_rate": 1.43430577278699e-05,
"loss": 0.9193,
"step": 225
},
{
"epoch": 0.43,
"learning_rate": 1.4045385404163533e-05,
"loss": 0.9259,
"step": 230
},
{
"epoch": 0.44,
"learning_rate": 1.3743363075917723e-05,
"loss": 0.9247,
"step": 235
},
{
"epoch": 0.45,
"learning_rate": 1.3437315507862568e-05,
"loss": 0.9267,
"step": 240
},
{
"epoch": 0.46,
"learning_rate": 1.312757179307012e-05,
"loss": 0.9193,
"step": 245
},
{
"epoch": 0.47,
"learning_rate": 1.28144649990805e-05,
"loss": 0.9263,
"step": 250
},
{
"epoch": 0.48,
"learning_rate": 1.2498331809754243e-05,
"loss": 0.9172,
"step": 255
},
{
"epoch": 0.49,
"learning_rate": 1.2179512163235973e-05,
"loss": 0.9223,
"step": 260
},
{
"epoch": 0.5,
"learning_rate": 1.1858348886418832e-05,
"loss": 0.9196,
"step": 265
},
{
"epoch": 0.51,
"learning_rate": 1.153518732630253e-05,
"loss": 0.9218,
"step": 270
},
{
"epoch": 0.52,
"learning_rate": 1.121037497864163e-05,
"loss": 0.9213,
"step": 275
},
{
"epoch": 0.52,
"learning_rate": 1.088426111428319e-05,
"loss": 0.9078,
"step": 280
},
{
"epoch": 0.53,
"learning_rate": 1.0557196403595689e-05,
"loss": 0.9135,
"step": 285
},
{
"epoch": 0.54,
"learning_rate": 1.0229532539393051e-05,
"loss": 0.9051,
"step": 290
},
{
"epoch": 0.54,
"eval_loss": 0.8942756056785583,
"eval_runtime": 214.7071,
"eval_samples_per_second": 171.531,
"eval_steps_per_second": 0.671,
"step": 290
},
{
"epoch": 0.54,
"step": 290,
"total_flos": 485917835919360.0,
"train_loss": 0.9702501186009111,
"train_runtime": 6614.4701,
"train_samples_per_second": 41.278,
"train_steps_per_second": 0.081
}
],
"logging_steps": 5,
"max_steps": 533,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 485917835919360.0,
"trial_name": null,
"trial_params": null
}