Nexspear's picture
Training in progress, step 40, checkpoint
dad1a14 verified
raw
history blame
4.87 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07543611504007544,
"eval_steps": 5,
"global_step": 40,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001885902876001886,
"eval_loss": 1.5560035705566406,
"eval_runtime": 68.851,
"eval_samples_per_second": 12.97,
"eval_steps_per_second": 1.627,
"step": 1
},
{
"epoch": 0.005657708628005658,
"grad_norm": 0.48519009351730347,
"learning_rate": 3e-05,
"loss": 1.5268,
"step": 3
},
{
"epoch": 0.00942951438000943,
"eval_loss": 1.493416428565979,
"eval_runtime": 68.726,
"eval_samples_per_second": 12.994,
"eval_steps_per_second": 1.63,
"step": 5
},
{
"epoch": 0.011315417256011316,
"grad_norm": 0.44942334294319153,
"learning_rate": 6e-05,
"loss": 1.6204,
"step": 6
},
{
"epoch": 0.016973125884016973,
"grad_norm": 0.3278067708015442,
"learning_rate": 9e-05,
"loss": 1.53,
"step": 9
},
{
"epoch": 0.01885902876001886,
"eval_loss": 1.3736556768417358,
"eval_runtime": 68.8401,
"eval_samples_per_second": 12.972,
"eval_steps_per_second": 1.627,
"step": 10
},
{
"epoch": 0.02263083451202263,
"grad_norm": 0.3598170578479767,
"learning_rate": 9.938441702975689e-05,
"loss": 1.4709,
"step": 12
},
{
"epoch": 0.028288543140028287,
"grad_norm": 0.2836269438266754,
"learning_rate": 9.619397662556435e-05,
"loss": 1.2911,
"step": 15
},
{
"epoch": 0.028288543140028287,
"eval_loss": 1.324967384338379,
"eval_runtime": 68.8059,
"eval_samples_per_second": 12.979,
"eval_steps_per_second": 1.628,
"step": 15
},
{
"epoch": 0.033946251768033946,
"grad_norm": 0.29951444268226624,
"learning_rate": 9.045084971874738e-05,
"loss": 1.2599,
"step": 18
},
{
"epoch": 0.03771805752003772,
"eval_loss": 1.3078007698059082,
"eval_runtime": 68.8442,
"eval_samples_per_second": 12.971,
"eval_steps_per_second": 1.627,
"step": 20
},
{
"epoch": 0.039603960396039604,
"grad_norm": 0.2421295940876007,
"learning_rate": 8.247240241650918e-05,
"loss": 1.2654,
"step": 21
},
{
"epoch": 0.04526166902404526,
"grad_norm": 0.25890016555786133,
"learning_rate": 7.269952498697734e-05,
"loss": 1.2665,
"step": 24
},
{
"epoch": 0.04714757190004715,
"eval_loss": 1.2976175546646118,
"eval_runtime": 68.8074,
"eval_samples_per_second": 12.978,
"eval_steps_per_second": 1.628,
"step": 25
},
{
"epoch": 0.05091937765205092,
"grad_norm": 0.24166753888130188,
"learning_rate": 6.167226819279528e-05,
"loss": 1.248,
"step": 27
},
{
"epoch": 0.056577086280056574,
"grad_norm": 0.2408292293548584,
"learning_rate": 5e-05,
"loss": 1.3139,
"step": 30
},
{
"epoch": 0.056577086280056574,
"eval_loss": 1.2924392223358154,
"eval_runtime": 68.7889,
"eval_samples_per_second": 12.982,
"eval_steps_per_second": 1.628,
"step": 30
},
{
"epoch": 0.06223479490806223,
"grad_norm": 0.24388636648654938,
"learning_rate": 3.832773180720475e-05,
"loss": 1.1885,
"step": 33
},
{
"epoch": 0.066006600660066,
"eval_loss": 1.288648009300232,
"eval_runtime": 68.7447,
"eval_samples_per_second": 12.99,
"eval_steps_per_second": 1.629,
"step": 35
},
{
"epoch": 0.06789250353606789,
"grad_norm": 0.2198489010334015,
"learning_rate": 2.7300475013022663e-05,
"loss": 1.3563,
"step": 36
},
{
"epoch": 0.07355021216407355,
"grad_norm": 0.23230212926864624,
"learning_rate": 1.7527597583490822e-05,
"loss": 1.2303,
"step": 39
},
{
"epoch": 0.07543611504007544,
"eval_loss": 1.2861415147781372,
"eval_runtime": 68.8353,
"eval_samples_per_second": 12.973,
"eval_steps_per_second": 1.627,
"step": 40
}
],
"logging_steps": 3,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.86529082703872e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}