dada22231's picture
Training in progress, step 30, checkpoint
5263316 verified
{
"best_metric": 0.6671230792999268,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.076923076923077,
"eval_steps": 25,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10256410256410256,
"grad_norm": 33.737701416015625,
"learning_rate": 5e-05,
"loss": 10.0947,
"step": 1
},
{
"epoch": 0.10256410256410256,
"eval_loss": 10.191614151000977,
"eval_runtime": 1.4382,
"eval_samples_per_second": 34.766,
"eval_steps_per_second": 9.039,
"step": 1
},
{
"epoch": 0.20512820512820512,
"grad_norm": 30.049089431762695,
"learning_rate": 0.0001,
"loss": 10.1498,
"step": 2
},
{
"epoch": 0.3076923076923077,
"grad_norm": 29.79092025756836,
"learning_rate": 9.971704944519594e-05,
"loss": 8.9179,
"step": 3
},
{
"epoch": 0.41025641025641024,
"grad_norm": 24.568462371826172,
"learning_rate": 9.887175604818206e-05,
"loss": 5.7438,
"step": 4
},
{
"epoch": 0.5128205128205128,
"grad_norm": 20.547700881958008,
"learning_rate": 9.747474986387654e-05,
"loss": 3.427,
"step": 5
},
{
"epoch": 0.6153846153846154,
"grad_norm": 12.677267074584961,
"learning_rate": 9.554359905560886e-05,
"loss": 2.256,
"step": 6
},
{
"epoch": 0.717948717948718,
"grad_norm": 9.974773406982422,
"learning_rate": 9.310258896527278e-05,
"loss": 1.4638,
"step": 7
},
{
"epoch": 0.8205128205128205,
"grad_norm": 4.876607894897461,
"learning_rate": 9.018241671106134e-05,
"loss": 1.0442,
"step": 8
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.5556421279907227,
"learning_rate": 8.681980515339464e-05,
"loss": 0.8391,
"step": 9
},
{
"epoch": 1.0256410256410255,
"grad_norm": 5.1846771240234375,
"learning_rate": 8.305704108364301e-05,
"loss": 1.0291,
"step": 10
},
{
"epoch": 1.1282051282051282,
"grad_norm": 0.995373547077179,
"learning_rate": 7.894144344319014e-05,
"loss": 0.7795,
"step": 11
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.5625572204589844,
"learning_rate": 7.452476826029011e-05,
"loss": 0.7668,
"step": 12
},
{
"epoch": 1.3333333333333333,
"grad_norm": 2.6887452602386475,
"learning_rate": 6.986255778798253e-05,
"loss": 0.8087,
"step": 13
},
{
"epoch": 1.435897435897436,
"grad_norm": 0.9819567203521729,
"learning_rate": 6.501344202803414e-05,
"loss": 0.7795,
"step": 14
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.8303066492080688,
"learning_rate": 6.003840142464886e-05,
"loss": 0.7573,
"step": 15
},
{
"epoch": 1.641025641025641,
"grad_norm": 1.412522315979004,
"learning_rate": 5.500000000000001e-05,
"loss": 0.8,
"step": 16
},
{
"epoch": 1.7435897435897436,
"grad_norm": 2.325110673904419,
"learning_rate": 4.9961598575351155e-05,
"loss": 0.7713,
"step": 17
},
{
"epoch": 1.8461538461538463,
"grad_norm": 1.1739482879638672,
"learning_rate": 4.498655797196586e-05,
"loss": 0.7873,
"step": 18
},
{
"epoch": 1.9487179487179487,
"grad_norm": 1.4842931032180786,
"learning_rate": 4.01374422120175e-05,
"loss": 0.803,
"step": 19
},
{
"epoch": 2.051282051282051,
"grad_norm": 1.1867262125015259,
"learning_rate": 3.547523173970989e-05,
"loss": 0.8698,
"step": 20
},
{
"epoch": 2.1538461538461537,
"grad_norm": 2.1852214336395264,
"learning_rate": 3.105855655680986e-05,
"loss": 0.8206,
"step": 21
},
{
"epoch": 2.2564102564102564,
"grad_norm": 2.442964792251587,
"learning_rate": 2.6942958916356998e-05,
"loss": 0.7546,
"step": 22
},
{
"epoch": 2.358974358974359,
"grad_norm": 2.661452293395996,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.8421,
"step": 23
},
{
"epoch": 2.4615384615384617,
"grad_norm": 1.0270347595214844,
"learning_rate": 1.981758328893866e-05,
"loss": 0.7241,
"step": 24
},
{
"epoch": 2.564102564102564,
"grad_norm": 0.8220930099487305,
"learning_rate": 1.6897411034727218e-05,
"loss": 0.7101,
"step": 25
},
{
"epoch": 2.564102564102564,
"eval_loss": 0.6671230792999268,
"eval_runtime": 1.4963,
"eval_samples_per_second": 33.416,
"eval_steps_per_second": 8.688,
"step": 25
},
{
"epoch": 2.6666666666666665,
"grad_norm": 1.176918387413025,
"learning_rate": 1.4456400944391146e-05,
"loss": 0.7793,
"step": 26
},
{
"epoch": 2.769230769230769,
"grad_norm": 0.5590916872024536,
"learning_rate": 1.252525013612346e-05,
"loss": 0.7132,
"step": 27
},
{
"epoch": 2.871794871794872,
"grad_norm": 0.7971133589744568,
"learning_rate": 1.1128243951817937e-05,
"loss": 0.736,
"step": 28
},
{
"epoch": 2.9743589743589745,
"grad_norm": 0.9218295216560364,
"learning_rate": 1.0282950554804085e-05,
"loss": 0.8827,
"step": 29
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.245664119720459,
"learning_rate": 1e-05,
"loss": 0.8213,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.698257208901632e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}