dada22231's picture
Training in progress, step 37, checkpoint
2129543 verified
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.051546391752577,
"eval_steps": 25,
"global_step": 37,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08247422680412371,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.08247422680412371,
"eval_loss": NaN,
"eval_runtime": 7.1462,
"eval_samples_per_second": 6.997,
"eval_steps_per_second": 1.819,
"step": 1
},
{
"epoch": 0.16494845360824742,
"grad_norm": NaN,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 2
},
{
"epoch": 0.24742268041237114,
"grad_norm": NaN,
"learning_rate": 9.981884322978576e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.32989690721649484,
"grad_norm": NaN,
"learning_rate": 9.927683148693833e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.41237113402061853,
"grad_norm": NaN,
"learning_rate": 9.83783287313134e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.4948453608247423,
"grad_norm": NaN,
"learning_rate": 9.713056917878818e-05,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.5773195876288659,
"grad_norm": NaN,
"learning_rate": 9.554359905560886e-05,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.6597938144329897,
"grad_norm": NaN,
"learning_rate": 9.363019571208397e-05,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.7422680412371134,
"grad_norm": NaN,
"learning_rate": 9.140576474687264e-05,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.8247422680412371,
"grad_norm": NaN,
"learning_rate": 8.88882159701625e-05,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.9072164948453608,
"grad_norm": NaN,
"learning_rate": 8.609781920440891e-05,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.9896907216494846,
"grad_norm": NaN,
"learning_rate": 8.305704108364301e-05,
"loss": 0.0,
"step": 12
},
{
"epoch": 1.0721649484536082,
"grad_norm": NaN,
"learning_rate": 7.979036416534463e-05,
"loss": 0.0,
"step": 13
},
{
"epoch": 1.1546391752577319,
"grad_norm": NaN,
"learning_rate": 7.632408981128494e-05,
"loss": 0.0,
"step": 14
},
{
"epoch": 1.2371134020618557,
"grad_norm": NaN,
"learning_rate": 7.268612642442657e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 1.3195876288659794,
"grad_norm": NaN,
"learning_rate": 6.890576474687263e-05,
"loss": 0.0,
"step": 16
},
{
"epoch": 1.402061855670103,
"grad_norm": NaN,
"learning_rate": 6.501344202803414e-05,
"loss": 0.0,
"step": 17
},
{
"epoch": 1.4845360824742269,
"grad_norm": NaN,
"learning_rate": 6.10404969617945e-05,
"loss": 0.0,
"step": 18
},
{
"epoch": 1.5670103092783505,
"grad_norm": NaN,
"learning_rate": 5.7018917365773184e-05,
"loss": 0.0,
"step": 19
},
{
"epoch": 1.6494845360824741,
"grad_norm": NaN,
"learning_rate": 5.2981082634226854e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 1.731958762886598,
"grad_norm": NaN,
"learning_rate": 4.895950303820552e-05,
"loss": 0.0,
"step": 21
},
{
"epoch": 1.8144329896907216,
"grad_norm": NaN,
"learning_rate": 4.498655797196586e-05,
"loss": 0.0,
"step": 22
},
{
"epoch": 1.8969072164948453,
"grad_norm": NaN,
"learning_rate": 4.109423525312738e-05,
"loss": 0.0,
"step": 23
},
{
"epoch": 1.9793814432989691,
"grad_norm": NaN,
"learning_rate": 3.7313873575573445e-05,
"loss": 0.0,
"step": 24
},
{
"epoch": 2.0618556701030926,
"grad_norm": NaN,
"learning_rate": 3.3675910188715066e-05,
"loss": 0.0,
"step": 25
},
{
"epoch": 2.0618556701030926,
"eval_loss": NaN,
"eval_runtime": 6.2561,
"eval_samples_per_second": 7.992,
"eval_steps_per_second": 2.078,
"step": 25
},
{
"epoch": 2.1443298969072164,
"grad_norm": NaN,
"learning_rate": 3.0209635834655392e-05,
"loss": 0.0,
"step": 26
},
{
"epoch": 2.2268041237113403,
"grad_norm": NaN,
"learning_rate": 2.6942958916356998e-05,
"loss": 0.0,
"step": 27
},
{
"epoch": 2.3092783505154637,
"grad_norm": NaN,
"learning_rate": 2.3902180795591093e-05,
"loss": 0.0,
"step": 28
},
{
"epoch": 2.3917525773195876,
"grad_norm": NaN,
"learning_rate": 2.111178402983751e-05,
"loss": 0.0,
"step": 29
},
{
"epoch": 2.4742268041237114,
"grad_norm": NaN,
"learning_rate": 1.8594235253127375e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 2.556701030927835,
"grad_norm": NaN,
"learning_rate": 1.6369804287916028e-05,
"loss": 0.0,
"step": 31
},
{
"epoch": 2.6391752577319587,
"grad_norm": NaN,
"learning_rate": 1.4456400944391146e-05,
"loss": 0.0,
"step": 32
},
{
"epoch": 2.7216494845360826,
"grad_norm": NaN,
"learning_rate": 1.2869430821211828e-05,
"loss": 0.0,
"step": 33
},
{
"epoch": 2.804123711340206,
"grad_norm": NaN,
"learning_rate": 1.1621671268686606e-05,
"loss": 0.0,
"step": 34
},
{
"epoch": 2.88659793814433,
"grad_norm": NaN,
"learning_rate": 1.0723168513061666e-05,
"loss": 0.0,
"step": 35
},
{
"epoch": 2.9690721649484537,
"grad_norm": NaN,
"learning_rate": 1.0181156770214243e-05,
"loss": 0.0,
"step": 36
},
{
"epoch": 3.051546391752577,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 37
}
],
"logging_steps": 1,
"max_steps": 37,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.784250223102525e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}