|
{ |
|
"best_metric": 0.26085758209228516, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 2.150537634408602, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08602150537634409, |
|
"grad_norm": 1.8886111974716187, |
|
"learning_rate": 5e-05, |
|
"loss": 0.685, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.08602150537634409, |
|
"eval_loss": 0.7152150869369507, |
|
"eval_runtime": 3.5362, |
|
"eval_samples_per_second": 22.34, |
|
"eval_steps_per_second": 2.828, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.17204301075268819, |
|
"grad_norm": 2.1663036346435547, |
|
"learning_rate": 0.0001, |
|
"loss": 0.6948, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": 1.239184021949768, |
|
"learning_rate": 9.977359612865423e-05, |
|
"loss": 0.694, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.34408602150537637, |
|
"grad_norm": 0.6597561836242676, |
|
"learning_rate": 9.909643486313533e-05, |
|
"loss": 0.4295, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.43010752688172044, |
|
"grad_norm": 0.7343956828117371, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.4282, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": 0.6272724866867065, |
|
"learning_rate": 9.641839665080363e-05, |
|
"loss": 0.409, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.6021505376344086, |
|
"grad_norm": 0.4894818961620331, |
|
"learning_rate": 9.444177243274618e-05, |
|
"loss": 0.3302, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.6881720430107527, |
|
"grad_norm": 0.49684205651283264, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 0.3077, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.7741935483870968, |
|
"grad_norm": 0.36764946579933167, |
|
"learning_rate": 8.930265473713938e-05, |
|
"loss": 0.3224, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.8602150537634409, |
|
"grad_norm": 0.31620141863822937, |
|
"learning_rate": 8.618670190525352e-05, |
|
"loss": 0.3121, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.946236559139785, |
|
"grad_norm": 0.37370702624320984, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 0.3347, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.032258064516129, |
|
"grad_norm": 0.5700744390487671, |
|
"learning_rate": 7.900284547855991e-05, |
|
"loss": 0.4621, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.118279569892473, |
|
"grad_norm": 0.43438950181007385, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.2736, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.2043010752688172, |
|
"grad_norm": 0.39586031436920166, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 0.2847, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.2903225806451613, |
|
"grad_norm": 0.4413219094276428, |
|
"learning_rate": 6.635339816587109e-05, |
|
"loss": 0.3298, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.3763440860215055, |
|
"grad_norm": 0.3812790811061859, |
|
"learning_rate": 6.178794677547137e-05, |
|
"loss": 0.2532, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.4623655913978495, |
|
"grad_norm": 0.3373176157474518, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 0.2443, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.5483870967741935, |
|
"grad_norm": 0.31114816665649414, |
|
"learning_rate": 5.2379095791187124e-05, |
|
"loss": 0.3003, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.6344086021505375, |
|
"grad_norm": 0.32430943846702576, |
|
"learning_rate": 4.762090420881289e-05, |
|
"loss": 0.2514, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.7204301075268817, |
|
"grad_norm": 0.3630256950855255, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 0.2745, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.8064516129032258, |
|
"grad_norm": 0.27914172410964966, |
|
"learning_rate": 3.821205322452863e-05, |
|
"loss": 0.242, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.89247311827957, |
|
"grad_norm": 0.37263160943984985, |
|
"learning_rate": 3.364660183412892e-05, |
|
"loss": 0.2772, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.978494623655914, |
|
"grad_norm": 0.46930578351020813, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 0.3409, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.064516129032258, |
|
"grad_norm": 0.40534141659736633, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.3342, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.150537634408602, |
|
"grad_norm": 0.34685373306274414, |
|
"learning_rate": 2.09971545214401e-05, |
|
"loss": 0.2348, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.150537634408602, |
|
"eval_loss": 0.26085758209228516, |
|
"eval_runtime": 3.5429, |
|
"eval_samples_per_second": 22.298, |
|
"eval_steps_per_second": 2.823, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 35, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.28244602173653e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|