RoyJoy's picture
Training in progress, step 31, checkpoint
46d391b verified
{
"best_metric": 2.7029125690460205,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.024390243902439,
"eval_steps": 25,
"global_step": 31,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0975609756097561,
"grad_norm": 7.10798978805542,
"learning_rate": 5e-05,
"loss": 5.2901,
"step": 1
},
{
"epoch": 0.0975609756097561,
"eval_loss": 6.794641494750977,
"eval_runtime": 0.6416,
"eval_samples_per_second": 77.936,
"eval_steps_per_second": 20.263,
"step": 1
},
{
"epoch": 0.1951219512195122,
"grad_norm": 13.896225929260254,
"learning_rate": 0.0001,
"loss": 6.7989,
"step": 2
},
{
"epoch": 0.2926829268292683,
"grad_norm": 6.749074459075928,
"learning_rate": 9.97362080719462e-05,
"loss": 5.3098,
"step": 3
},
{
"epoch": 0.3902439024390244,
"grad_norm": 11.291173934936523,
"learning_rate": 9.89479250069539e-05,
"loss": 4.9457,
"step": 4
},
{
"epoch": 0.4878048780487805,
"grad_norm": 51.770416259765625,
"learning_rate": 9.764439270322611e-05,
"loss": 6.001,
"step": 5
},
{
"epoch": 0.5853658536585366,
"grad_norm": 7.940297603607178,
"learning_rate": 9.584089388519307e-05,
"loss": 3.9051,
"step": 6
},
{
"epoch": 0.6829268292682927,
"grad_norm": 23.023454666137695,
"learning_rate": 9.355857292754152e-05,
"loss": 3.9001,
"step": 7
},
{
"epoch": 0.7804878048780488,
"grad_norm": 5.207144737243652,
"learning_rate": 9.082418795675397e-05,
"loss": 3.8266,
"step": 8
},
{
"epoch": 0.8780487804878049,
"grad_norm": 8.671276092529297,
"learning_rate": 8.766979713654089e-05,
"loss": 3.5264,
"step": 9
},
{
"epoch": 0.975609756097561,
"grad_norm": 17.078868865966797,
"learning_rate": 8.413238281518225e-05,
"loss": 3.9503,
"step": 10
},
{
"epoch": 1.0731707317073171,
"grad_norm": 6.692962646484375,
"learning_rate": 8.025341794130722e-05,
"loss": 5.3809,
"step": 11
},
{
"epoch": 1.170731707317073,
"grad_norm": 5.900484085083008,
"learning_rate": 7.607837983149056e-05,
"loss": 3.0676,
"step": 12
},
{
"epoch": 1.2682926829268293,
"grad_norm": 3.715177297592163,
"learning_rate": 7.165621699029615e-05,
"loss": 2.3503,
"step": 13
},
{
"epoch": 1.3658536585365852,
"grad_norm": 8.826342582702637,
"learning_rate": 6.703877523381496e-05,
"loss": 3.4234,
"step": 14
},
{
"epoch": 1.4634146341463414,
"grad_norm": 7.122591972351074,
"learning_rate": 6.228018984487442e-05,
"loss": 2.9165,
"step": 15
},
{
"epoch": 1.5609756097560976,
"grad_norm": 4.160889148712158,
"learning_rate": 5.74362508863438e-05,
"loss": 2.9145,
"step": 16
},
{
"epoch": 1.6585365853658538,
"grad_norm": 5.585999011993408,
"learning_rate": 5.2563749113656216e-05,
"loss": 2.6976,
"step": 17
},
{
"epoch": 1.7560975609756098,
"grad_norm": 2.9115383625030518,
"learning_rate": 4.771981015512559e-05,
"loss": 2.1684,
"step": 18
},
{
"epoch": 1.8536585365853657,
"grad_norm": 5.3558268547058105,
"learning_rate": 4.296122476618507e-05,
"loss": 2.8113,
"step": 19
},
{
"epoch": 1.951219512195122,
"grad_norm": 11.97620964050293,
"learning_rate": 3.834378300970385e-05,
"loss": 2.7463,
"step": 20
},
{
"epoch": 2.048780487804878,
"grad_norm": 6.126107692718506,
"learning_rate": 3.392162016850945e-05,
"loss": 4.1316,
"step": 21
},
{
"epoch": 2.1463414634146343,
"grad_norm": 3.347085475921631,
"learning_rate": 2.97465820586928e-05,
"loss": 2.2521,
"step": 22
},
{
"epoch": 2.2439024390243905,
"grad_norm": 3.246694326400757,
"learning_rate": 2.586761718481776e-05,
"loss": 1.2408,
"step": 23
},
{
"epoch": 2.341463414634146,
"grad_norm": 6.535632133483887,
"learning_rate": 2.2330202863459122e-05,
"loss": 3.2995,
"step": 24
},
{
"epoch": 2.4390243902439024,
"grad_norm": 3.856961250305176,
"learning_rate": 1.9175812043246033e-05,
"loss": 2.2837,
"step": 25
},
{
"epoch": 2.4390243902439024,
"eval_loss": 2.7029125690460205,
"eval_runtime": 0.6175,
"eval_samples_per_second": 80.966,
"eval_steps_per_second": 21.051,
"step": 25
},
{
"epoch": 2.5365853658536586,
"grad_norm": 4.493419170379639,
"learning_rate": 1.6441427072458494e-05,
"loss": 2.7897,
"step": 26
},
{
"epoch": 2.6341463414634148,
"grad_norm": 3.798736810684204,
"learning_rate": 1.4159106114806942e-05,
"loss": 2.2273,
"step": 27
},
{
"epoch": 2.7317073170731705,
"grad_norm": 3.0154953002929688,
"learning_rate": 1.2355607296773895e-05,
"loss": 1.2718,
"step": 28
},
{
"epoch": 2.8292682926829267,
"grad_norm": 6.761699199676514,
"learning_rate": 1.1052074993046102e-05,
"loss": 3.4291,
"step": 29
},
{
"epoch": 2.926829268292683,
"grad_norm": 3.54974365234375,
"learning_rate": 1.0263791928053819e-05,
"loss": 2.36,
"step": 30
},
{
"epoch": 3.024390243902439,
"grad_norm": 5.698737144470215,
"learning_rate": 1e-05,
"loss": 4.0453,
"step": 31
}
],
"logging_steps": 1,
"max_steps": 31,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.830865658393395e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}