|
{ |
|
"best_metric": 0.8249684572219849, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 2.2988505747126435, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09195402298850575, |
|
"grad_norm": 36.40338134765625, |
|
"learning_rate": 5e-05, |
|
"loss": 7.3875, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.09195402298850575, |
|
"eval_loss": 7.518415927886963, |
|
"eval_runtime": 2.3602, |
|
"eval_samples_per_second": 31.353, |
|
"eval_steps_per_second": 4.237, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1839080459770115, |
|
"grad_norm": 36.9449462890625, |
|
"learning_rate": 0.0001, |
|
"loss": 7.4548, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.27586206896551724, |
|
"grad_norm": 50.2653694152832, |
|
"learning_rate": 9.974346616959476e-05, |
|
"loss": 4.7082, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.367816091954023, |
|
"grad_norm": 18.120019912719727, |
|
"learning_rate": 9.897649706262473e-05, |
|
"loss": 1.6205, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.45977011494252873, |
|
"grad_norm": 11.145186424255371, |
|
"learning_rate": 9.770696282000244e-05, |
|
"loss": 1.1133, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.5517241379310345, |
|
"grad_norm": 11.67027473449707, |
|
"learning_rate": 9.594789058101153e-05, |
|
"loss": 0.9955, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.6436781609195402, |
|
"grad_norm": 2.167078971862793, |
|
"learning_rate": 9.371733080722911e-05, |
|
"loss": 0.8398, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.735632183908046, |
|
"grad_norm": 7.674814224243164, |
|
"learning_rate": 9.103817206036382e-05, |
|
"loss": 1.0311, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.8275862068965517, |
|
"grad_norm": 1.134879469871521, |
|
"learning_rate": 8.793790613463955e-05, |
|
"loss": 0.8035, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.9195402298850575, |
|
"grad_norm": 4.637085437774658, |
|
"learning_rate": 8.444834595378434e-05, |
|
"loss": 0.8538, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0114942528735633, |
|
"grad_norm": 22.309356689453125, |
|
"learning_rate": 8.060529912738315e-05, |
|
"loss": 1.2659, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.103448275862069, |
|
"grad_norm": 17.529563903808594, |
|
"learning_rate": 7.644820051634812e-05, |
|
"loss": 1.0434, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.1954022988505748, |
|
"grad_norm": 2.4869232177734375, |
|
"learning_rate": 7.201970757788172e-05, |
|
"loss": 0.7968, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.2873563218390804, |
|
"grad_norm": 6.888029098510742, |
|
"learning_rate": 6.736526264224101e-05, |
|
"loss": 0.9996, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.3793103448275863, |
|
"grad_norm": 1.6961114406585693, |
|
"learning_rate": 6.253262661293604e-05, |
|
"loss": 0.8331, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.471264367816092, |
|
"grad_norm": 5.048608779907227, |
|
"learning_rate": 5.757138887522884e-05, |
|
"loss": 1.0152, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.5632183908045976, |
|
"grad_norm": 0.6340063810348511, |
|
"learning_rate": 5.2532458441935636e-05, |
|
"loss": 0.7896, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.6551724137931034, |
|
"grad_norm": 0.6864052414894104, |
|
"learning_rate": 4.746754155806437e-05, |
|
"loss": 0.7839, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.7471264367816093, |
|
"grad_norm": 5.3248724937438965, |
|
"learning_rate": 4.2428611124771184e-05, |
|
"loss": 0.8882, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.839080459770115, |
|
"grad_norm": 1.964074969291687, |
|
"learning_rate": 3.746737338706397e-05, |
|
"loss": 0.8144, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.9310344827586206, |
|
"grad_norm": 2.0052649974823, |
|
"learning_rate": 3.263473735775899e-05, |
|
"loss": 0.8666, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.0229885057471266, |
|
"grad_norm": 2.9962081909179688, |
|
"learning_rate": 2.798029242211828e-05, |
|
"loss": 0.9786, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.1149425287356323, |
|
"grad_norm": 3.63395094871521, |
|
"learning_rate": 2.3551799483651894e-05, |
|
"loss": 0.8251, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.206896551724138, |
|
"grad_norm": 2.464134931564331, |
|
"learning_rate": 1.9394700872616855e-05, |
|
"loss": 0.8161, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.2988505747126435, |
|
"grad_norm": 3.2755239009857178, |
|
"learning_rate": 1.555165404621567e-05, |
|
"loss": 0.8579, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.2988505747126435, |
|
"eval_loss": 0.8249684572219849, |
|
"eval_runtime": 2.3682, |
|
"eval_samples_per_second": 31.248, |
|
"eval_steps_per_second": 4.223, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 33, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.087239672233984e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|