dimasik1987's picture
Training in progress, step 25, checkpoint
40b582a verified
raw
history blame
5.51 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.028216704288939052,
"eval_steps": 25,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001128668171557562,
"grad_norm": 46.31266784667969,
"learning_rate": 6.666666666666667e-05,
"loss": 11.9289,
"step": 1
},
{
"epoch": 0.001128668171557562,
"eval_loss": 11.48071575164795,
"eval_runtime": 218.574,
"eval_samples_per_second": 3.418,
"eval_steps_per_second": 1.711,
"step": 1
},
{
"epoch": 0.002257336343115124,
"grad_norm": 32.02701187133789,
"learning_rate": 0.00013333333333333334,
"loss": 9.1416,
"step": 2
},
{
"epoch": 0.003386004514672686,
"grad_norm": 22.43347930908203,
"learning_rate": 0.0002,
"loss": 7.6477,
"step": 3
},
{
"epoch": 0.004514672686230248,
"grad_norm": 24.434974670410156,
"learning_rate": 0.0001999048221581858,
"loss": 7.0388,
"step": 4
},
{
"epoch": 0.0056433408577878106,
"grad_norm": 21.187652587890625,
"learning_rate": 0.00019961946980917456,
"loss": 3.8237,
"step": 5
},
{
"epoch": 0.006772009029345372,
"grad_norm": 18.851232528686523,
"learning_rate": 0.00019914448613738106,
"loss": 2.9445,
"step": 6
},
{
"epoch": 0.007900677200902935,
"grad_norm": 21.697521209716797,
"learning_rate": 0.00019848077530122083,
"loss": 3.5231,
"step": 7
},
{
"epoch": 0.009029345372460496,
"grad_norm": 14.041197776794434,
"learning_rate": 0.00019762960071199333,
"loss": 2.2053,
"step": 8
},
{
"epoch": 0.010158013544018058,
"grad_norm": 15.442231178283691,
"learning_rate": 0.00019659258262890683,
"loss": 2.3755,
"step": 9
},
{
"epoch": 0.011286681715575621,
"grad_norm": 14.979207038879395,
"learning_rate": 0.0001953716950748227,
"loss": 2.5331,
"step": 10
},
{
"epoch": 0.012415349887133182,
"grad_norm": 11.316594123840332,
"learning_rate": 0.00019396926207859084,
"loss": 2.2067,
"step": 11
},
{
"epoch": 0.013544018058690745,
"grad_norm": 10.44298267364502,
"learning_rate": 0.0001923879532511287,
"loss": 1.641,
"step": 12
},
{
"epoch": 0.014672686230248307,
"grad_norm": 11.997466087341309,
"learning_rate": 0.000190630778703665,
"loss": 1.7946,
"step": 13
},
{
"epoch": 0.01580135440180587,
"grad_norm": 12.115503311157227,
"learning_rate": 0.00018870108331782217,
"loss": 2.4233,
"step": 14
},
{
"epoch": 0.016930022573363433,
"grad_norm": 11.261882781982422,
"learning_rate": 0.00018660254037844388,
"loss": 1.8223,
"step": 15
},
{
"epoch": 0.01805869074492099,
"grad_norm": 10.030159950256348,
"learning_rate": 0.0001843391445812886,
"loss": 1.8842,
"step": 16
},
{
"epoch": 0.019187358916478554,
"grad_norm": 12.912628173828125,
"learning_rate": 0.0001819152044288992,
"loss": 2.4811,
"step": 17
},
{
"epoch": 0.020316027088036117,
"grad_norm": 14.034784317016602,
"learning_rate": 0.00017933533402912354,
"loss": 2.0478,
"step": 18
},
{
"epoch": 0.02144469525959368,
"grad_norm": 11.340625762939453,
"learning_rate": 0.0001766044443118978,
"loss": 1.4553,
"step": 19
},
{
"epoch": 0.022573363431151242,
"grad_norm": 13.417366981506348,
"learning_rate": 0.0001737277336810124,
"loss": 2.2667,
"step": 20
},
{
"epoch": 0.023702031602708805,
"grad_norm": 10.488327026367188,
"learning_rate": 0.00017071067811865476,
"loss": 1.8455,
"step": 21
},
{
"epoch": 0.024830699774266364,
"grad_norm": 8.669241905212402,
"learning_rate": 0.00016755902076156604,
"loss": 1.7471,
"step": 22
},
{
"epoch": 0.025959367945823927,
"grad_norm": 8.316575050354004,
"learning_rate": 0.00016427876096865394,
"loss": 1.4701,
"step": 23
},
{
"epoch": 0.02708803611738149,
"grad_norm": 9.43625259399414,
"learning_rate": 0.00016087614290087208,
"loss": 1.6207,
"step": 24
},
{
"epoch": 0.028216704288939052,
"grad_norm": 8.611120223999023,
"learning_rate": 0.0001573576436351046,
"loss": 1.642,
"step": 25
},
{
"epoch": 0.028216704288939052,
"eval_loss": 1.9240721464157104,
"eval_runtime": 220.5343,
"eval_samples_per_second": 3.387,
"eval_steps_per_second": 1.696,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.83447180771328e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}