bbytxt's picture
Training in progress, step 50, checkpoint
4389f41 verified
{
"best_metric": 11.929618835449219,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.11217049915872125,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002243409983174425,
"grad_norm": 0.020893922075629234,
"learning_rate": 5e-06,
"loss": 11.9328,
"step": 1
},
{
"epoch": 0.002243409983174425,
"eval_loss": 11.932412147521973,
"eval_runtime": 4.0488,
"eval_samples_per_second": 185.485,
"eval_steps_per_second": 23.217,
"step": 1
},
{
"epoch": 0.00448681996634885,
"grad_norm": 0.02059100568294525,
"learning_rate": 1e-05,
"loss": 11.9339,
"step": 2
},
{
"epoch": 0.006730229949523275,
"grad_norm": 0.019679663702845573,
"learning_rate": 1.5e-05,
"loss": 11.9333,
"step": 3
},
{
"epoch": 0.0089736399326977,
"grad_norm": 0.01955411024391651,
"learning_rate": 2e-05,
"loss": 11.9326,
"step": 4
},
{
"epoch": 0.011217049915872126,
"grad_norm": 0.017372841015458107,
"learning_rate": 2.5e-05,
"loss": 11.9329,
"step": 5
},
{
"epoch": 0.01346045989904655,
"grad_norm": 0.018285011872649193,
"learning_rate": 3e-05,
"loss": 11.9328,
"step": 6
},
{
"epoch": 0.015703869882220976,
"grad_norm": 0.019042452797293663,
"learning_rate": 3.5e-05,
"loss": 11.9325,
"step": 7
},
{
"epoch": 0.0179472798653954,
"grad_norm": 0.01803368702530861,
"learning_rate": 4e-05,
"loss": 11.933,
"step": 8
},
{
"epoch": 0.020190689848569827,
"grad_norm": 0.016315225511789322,
"learning_rate": 4.5e-05,
"loss": 11.9328,
"step": 9
},
{
"epoch": 0.022434099831744252,
"grad_norm": 0.01673940010368824,
"learning_rate": 5e-05,
"loss": 11.9328,
"step": 10
},
{
"epoch": 0.024677509814918678,
"grad_norm": 0.013101082295179367,
"learning_rate": 5.500000000000001e-05,
"loss": 11.9339,
"step": 11
},
{
"epoch": 0.0269209197980931,
"grad_norm": 0.012066581286489964,
"learning_rate": 6e-05,
"loss": 11.9315,
"step": 12
},
{
"epoch": 0.029164329781267526,
"grad_norm": 0.013472984544932842,
"learning_rate": 6.500000000000001e-05,
"loss": 11.9321,
"step": 13
},
{
"epoch": 0.03140773976444195,
"grad_norm": 0.01844089850783348,
"learning_rate": 7e-05,
"loss": 11.9313,
"step": 14
},
{
"epoch": 0.03365114974761638,
"grad_norm": 0.016358327120542526,
"learning_rate": 7.500000000000001e-05,
"loss": 11.9319,
"step": 15
},
{
"epoch": 0.0358945597307908,
"grad_norm": 0.01452179066836834,
"learning_rate": 8e-05,
"loss": 11.9312,
"step": 16
},
{
"epoch": 0.03813796971396523,
"grad_norm": 0.014436909928917885,
"learning_rate": 8.5e-05,
"loss": 11.9319,
"step": 17
},
{
"epoch": 0.040381379697139654,
"grad_norm": 0.014677215367555618,
"learning_rate": 9e-05,
"loss": 11.9322,
"step": 18
},
{
"epoch": 0.04262478968031408,
"grad_norm": 0.020382890477776527,
"learning_rate": 9.5e-05,
"loss": 11.9319,
"step": 19
},
{
"epoch": 0.044868199663488505,
"grad_norm": 0.020231682807207108,
"learning_rate": 0.0001,
"loss": 11.9305,
"step": 20
},
{
"epoch": 0.04711160964666293,
"grad_norm": 0.018099000677466393,
"learning_rate": 9.972609476841367e-05,
"loss": 11.9309,
"step": 21
},
{
"epoch": 0.049355019629837356,
"grad_norm": 0.015480943955481052,
"learning_rate": 9.890738003669029e-05,
"loss": 11.9323,
"step": 22
},
{
"epoch": 0.051598429613011774,
"grad_norm": 0.016374170780181885,
"learning_rate": 9.755282581475769e-05,
"loss": 11.9319,
"step": 23
},
{
"epoch": 0.0538418395961862,
"grad_norm": 0.024180244654417038,
"learning_rate": 9.567727288213005e-05,
"loss": 11.9327,
"step": 24
},
{
"epoch": 0.056085249579360626,
"grad_norm": 0.025373732671141624,
"learning_rate": 9.330127018922194e-05,
"loss": 11.9319,
"step": 25
},
{
"epoch": 0.056085249579360626,
"eval_loss": 11.931512832641602,
"eval_runtime": 4.0447,
"eval_samples_per_second": 185.677,
"eval_steps_per_second": 23.241,
"step": 25
},
{
"epoch": 0.05832865956253505,
"grad_norm": 0.021957021206617355,
"learning_rate": 9.045084971874738e-05,
"loss": 11.9322,
"step": 26
},
{
"epoch": 0.06057206954570948,
"grad_norm": 0.025794783607125282,
"learning_rate": 8.715724127386972e-05,
"loss": 11.9327,
"step": 27
},
{
"epoch": 0.0628154795288839,
"grad_norm": 0.034966181963682175,
"learning_rate": 8.345653031794292e-05,
"loss": 11.9313,
"step": 28
},
{
"epoch": 0.06505888951205833,
"grad_norm": 0.032141417264938354,
"learning_rate": 7.938926261462366e-05,
"loss": 11.9309,
"step": 29
},
{
"epoch": 0.06730229949523275,
"grad_norm": 0.036759525537490845,
"learning_rate": 7.500000000000001e-05,
"loss": 11.9322,
"step": 30
},
{
"epoch": 0.06954570947840717,
"grad_norm": 0.028740139678120613,
"learning_rate": 7.033683215379002e-05,
"loss": 11.9313,
"step": 31
},
{
"epoch": 0.0717891194615816,
"grad_norm": 0.03210673853754997,
"learning_rate": 6.545084971874738e-05,
"loss": 11.931,
"step": 32
},
{
"epoch": 0.07403252944475602,
"grad_norm": 0.05024946480989456,
"learning_rate": 6.0395584540887963e-05,
"loss": 11.9295,
"step": 33
},
{
"epoch": 0.07627593942793046,
"grad_norm": 0.041959248483181,
"learning_rate": 5.522642316338268e-05,
"loss": 11.932,
"step": 34
},
{
"epoch": 0.07851934941110487,
"grad_norm": 0.050003744661808014,
"learning_rate": 5e-05,
"loss": 11.9288,
"step": 35
},
{
"epoch": 0.08076275939427931,
"grad_norm": 0.04736671969294548,
"learning_rate": 4.477357683661734e-05,
"loss": 11.9286,
"step": 36
},
{
"epoch": 0.08300616937745373,
"grad_norm": 0.0493813119828701,
"learning_rate": 3.960441545911204e-05,
"loss": 11.9302,
"step": 37
},
{
"epoch": 0.08524957936062816,
"grad_norm": 0.04406358674168587,
"learning_rate": 3.4549150281252636e-05,
"loss": 11.9318,
"step": 38
},
{
"epoch": 0.08749298934380258,
"grad_norm": 0.05293726176023483,
"learning_rate": 2.9663167846209998e-05,
"loss": 11.9301,
"step": 39
},
{
"epoch": 0.08973639932697701,
"grad_norm": 0.04568186402320862,
"learning_rate": 2.500000000000001e-05,
"loss": 11.9317,
"step": 40
},
{
"epoch": 0.09197980931015143,
"grad_norm": 0.06105807051062584,
"learning_rate": 2.061073738537635e-05,
"loss": 11.9306,
"step": 41
},
{
"epoch": 0.09422321929332586,
"grad_norm": 0.05900374799966812,
"learning_rate": 1.6543469682057106e-05,
"loss": 11.9279,
"step": 42
},
{
"epoch": 0.09646662927650028,
"grad_norm": 0.05491477623581886,
"learning_rate": 1.2842758726130283e-05,
"loss": 11.9291,
"step": 43
},
{
"epoch": 0.09871003925967471,
"grad_norm": 0.039628300815820694,
"learning_rate": 9.549150281252633e-06,
"loss": 11.9298,
"step": 44
},
{
"epoch": 0.10095344924284913,
"grad_norm": 0.05878068134188652,
"learning_rate": 6.698729810778065e-06,
"loss": 11.9303,
"step": 45
},
{
"epoch": 0.10319685922602355,
"grad_norm": 0.06594429910182953,
"learning_rate": 4.322727117869951e-06,
"loss": 11.9312,
"step": 46
},
{
"epoch": 0.10544026920919798,
"grad_norm": 0.05928008258342743,
"learning_rate": 2.4471741852423237e-06,
"loss": 11.9317,
"step": 47
},
{
"epoch": 0.1076836791923724,
"grad_norm": 0.07683888077735901,
"learning_rate": 1.0926199633097157e-06,
"loss": 11.9279,
"step": 48
},
{
"epoch": 0.10992708917554683,
"grad_norm": 0.06153682991862297,
"learning_rate": 2.7390523158633554e-07,
"loss": 11.9302,
"step": 49
},
{
"epoch": 0.11217049915872125,
"grad_norm": 0.06444248557090759,
"learning_rate": 0.0,
"loss": 11.9299,
"step": 50
},
{
"epoch": 0.11217049915872125,
"eval_loss": 11.929618835449219,
"eval_runtime": 4.0408,
"eval_samples_per_second": 185.856,
"eval_steps_per_second": 23.263,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 241041408000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}