dimasik87's picture
Training in progress, step 25, checkpoint
ecad262 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.00177210703526493,
"eval_steps": 3,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.08842814105972e-05,
"grad_norm": 0.07005972415208817,
"learning_rate": 2e-05,
"loss": 10.3888,
"step": 1
},
{
"epoch": 7.08842814105972e-05,
"eval_loss": 10.386326789855957,
"eval_runtime": 21.6585,
"eval_samples_per_second": 137.129,
"eval_steps_per_second": 137.129,
"step": 1
},
{
"epoch": 0.0001417685628211944,
"grad_norm": 0.07696182280778885,
"learning_rate": 4e-05,
"loss": 10.3974,
"step": 2
},
{
"epoch": 0.0002126528442317916,
"grad_norm": 0.08747204393148422,
"learning_rate": 6e-05,
"loss": 10.3903,
"step": 3
},
{
"epoch": 0.0002126528442317916,
"eval_loss": 10.386242866516113,
"eval_runtime": 21.8717,
"eval_samples_per_second": 135.792,
"eval_steps_per_second": 135.792,
"step": 3
},
{
"epoch": 0.0002835371256423888,
"grad_norm": 0.07742352038621902,
"learning_rate": 8e-05,
"loss": 10.383,
"step": 4
},
{
"epoch": 0.000354421407052986,
"grad_norm": 0.08146929740905762,
"learning_rate": 0.0001,
"loss": 10.3837,
"step": 5
},
{
"epoch": 0.0004253056884635832,
"grad_norm": 0.10105450451374054,
"learning_rate": 0.00012,
"loss": 10.3935,
"step": 6
},
{
"epoch": 0.0004253056884635832,
"eval_loss": 10.385862350463867,
"eval_runtime": 21.8973,
"eval_samples_per_second": 135.633,
"eval_steps_per_second": 135.633,
"step": 6
},
{
"epoch": 0.0004961899698741804,
"grad_norm": 0.07793600857257843,
"learning_rate": 0.00014,
"loss": 10.392,
"step": 7
},
{
"epoch": 0.0005670742512847776,
"grad_norm": 0.1045127809047699,
"learning_rate": 0.00016,
"loss": 10.3973,
"step": 8
},
{
"epoch": 0.0006379585326953748,
"grad_norm": 0.08697844296693802,
"learning_rate": 0.00018,
"loss": 10.3811,
"step": 9
},
{
"epoch": 0.0006379585326953748,
"eval_loss": 10.385165214538574,
"eval_runtime": 21.8138,
"eval_samples_per_second": 136.152,
"eval_steps_per_second": 136.152,
"step": 9
},
{
"epoch": 0.000708842814105972,
"grad_norm": 0.08561215549707413,
"learning_rate": 0.0002,
"loss": 10.382,
"step": 10
},
{
"epoch": 0.0007797270955165692,
"grad_norm": 0.09970103204250336,
"learning_rate": 0.00019781476007338058,
"loss": 10.389,
"step": 11
},
{
"epoch": 0.0008506113769271664,
"grad_norm": 0.09796269983053207,
"learning_rate": 0.0001913545457642601,
"loss": 10.385,
"step": 12
},
{
"epoch": 0.0008506113769271664,
"eval_loss": 10.384222984313965,
"eval_runtime": 21.8059,
"eval_samples_per_second": 136.202,
"eval_steps_per_second": 136.202,
"step": 12
},
{
"epoch": 0.0009214956583377636,
"grad_norm": 0.08635301142930984,
"learning_rate": 0.00018090169943749476,
"loss": 10.3813,
"step": 13
},
{
"epoch": 0.0009923799397483608,
"grad_norm": 0.07206328958272934,
"learning_rate": 0.00016691306063588583,
"loss": 10.3874,
"step": 14
},
{
"epoch": 0.001063264221158958,
"grad_norm": 0.09540396928787231,
"learning_rate": 0.00015000000000000001,
"loss": 10.3825,
"step": 15
},
{
"epoch": 0.001063264221158958,
"eval_loss": 10.38329792022705,
"eval_runtime": 23.4703,
"eval_samples_per_second": 126.543,
"eval_steps_per_second": 126.543,
"step": 15
},
{
"epoch": 0.0011341485025695552,
"grad_norm": 0.09925121068954468,
"learning_rate": 0.00013090169943749476,
"loss": 10.3858,
"step": 16
},
{
"epoch": 0.0012050327839801525,
"grad_norm": 0.12503716349601746,
"learning_rate": 0.00011045284632676536,
"loss": 10.3875,
"step": 17
},
{
"epoch": 0.0012759170653907496,
"grad_norm": 0.08620873838663101,
"learning_rate": 8.954715367323468e-05,
"loss": 10.385,
"step": 18
},
{
"epoch": 0.0012759170653907496,
"eval_loss": 10.382616996765137,
"eval_runtime": 21.933,
"eval_samples_per_second": 135.412,
"eval_steps_per_second": 135.412,
"step": 18
},
{
"epoch": 0.0013468013468013469,
"grad_norm": 0.0887003093957901,
"learning_rate": 6.909830056250527e-05,
"loss": 10.3812,
"step": 19
},
{
"epoch": 0.001417685628211944,
"grad_norm": 0.10247205197811127,
"learning_rate": 5.000000000000002e-05,
"loss": 10.3803,
"step": 20
},
{
"epoch": 0.0014885699096225413,
"grad_norm": 0.10386703908443451,
"learning_rate": 3.308693936411421e-05,
"loss": 10.3872,
"step": 21
},
{
"epoch": 0.0014885699096225413,
"eval_loss": 10.382221221923828,
"eval_runtime": 21.9648,
"eval_samples_per_second": 135.217,
"eval_steps_per_second": 135.217,
"step": 21
},
{
"epoch": 0.0015594541910331384,
"grad_norm": 0.11468103528022766,
"learning_rate": 1.9098300562505266e-05,
"loss": 10.3818,
"step": 22
},
{
"epoch": 0.0016303384724437357,
"grad_norm": 0.10616473853588104,
"learning_rate": 8.645454235739903e-06,
"loss": 10.3837,
"step": 23
},
{
"epoch": 0.0017012227538543328,
"grad_norm": 0.10667341947555542,
"learning_rate": 2.1852399266194314e-06,
"loss": 10.3874,
"step": 24
},
{
"epoch": 0.0017012227538543328,
"eval_loss": 10.382101058959961,
"eval_runtime": 23.3378,
"eval_samples_per_second": 127.262,
"eval_steps_per_second": 127.262,
"step": 24
},
{
"epoch": 0.00177210703526493,
"grad_norm": 0.07770507037639618,
"learning_rate": 0.0,
"loss": 10.3784,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 25,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 646545408000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}