|
{ |
|
"best_metric": 0.410976380109787, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.004951261024292124, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00019805044097168497, |
|
"grad_norm": 0.3693576455116272, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4965, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00019805044097168497, |
|
"eval_loss": 1.5097695589065552, |
|
"eval_runtime": 3.0017, |
|
"eval_samples_per_second": 16.657, |
|
"eval_steps_per_second": 4.331, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00039610088194336993, |
|
"grad_norm": 0.5695458650588989, |
|
"learning_rate": 0.0001, |
|
"loss": 0.7833, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.000594151322915055, |
|
"grad_norm": 0.538998544216156, |
|
"learning_rate": 9.958086757163489e-05, |
|
"loss": 0.8395, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0007922017638867399, |
|
"grad_norm": 0.48934289813041687, |
|
"learning_rate": 9.833127793065098e-05, |
|
"loss": 0.8156, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.000990252204858425, |
|
"grad_norm": 0.5884993672370911, |
|
"learning_rate": 9.627450856774539e-05, |
|
"loss": 0.7746, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00118830264583011, |
|
"grad_norm": 0.942425012588501, |
|
"learning_rate": 9.3448873204592e-05, |
|
"loss": 0.7724, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0013863530868017948, |
|
"grad_norm": 1.2021466493606567, |
|
"learning_rate": 8.990700808169889e-05, |
|
"loss": 0.7622, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0015844035277734797, |
|
"grad_norm": 1.1035913228988647, |
|
"learning_rate": 8.571489144483944e-05, |
|
"loss": 0.8172, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0017824539687451648, |
|
"grad_norm": 1.4270416498184204, |
|
"learning_rate": 8.095061449516903e-05, |
|
"loss": 0.7218, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00198050440971685, |
|
"grad_norm": 1.638703465461731, |
|
"learning_rate": 7.570292669790186e-05, |
|
"loss": 0.7281, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002178554850688535, |
|
"grad_norm": 1.686064600944519, |
|
"learning_rate": 7.006958254769438e-05, |
|
"loss": 0.8658, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00237660529166022, |
|
"grad_norm": 2.808910369873047, |
|
"learning_rate": 6.415552058736854e-05, |
|
"loss": 1.0043, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0025746557326319047, |
|
"grad_norm": 4.342678070068359, |
|
"learning_rate": 5.80709086014102e-05, |
|
"loss": 0.4586, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0027727061736035896, |
|
"grad_norm": 3.246701955795288, |
|
"learning_rate": 5.192909139858981e-05, |
|
"loss": 0.5144, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0029707566145752745, |
|
"grad_norm": 1.3120652437210083, |
|
"learning_rate": 4.584447941263149e-05, |
|
"loss": 0.5208, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0031688070555469594, |
|
"grad_norm": 1.1554656028747559, |
|
"learning_rate": 3.9930417452305626e-05, |
|
"loss": 0.5637, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.003366857496518645, |
|
"grad_norm": 0.8644917607307434, |
|
"learning_rate": 3.4297073302098156e-05, |
|
"loss": 0.4661, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0035649079374903297, |
|
"grad_norm": 1.2488857507705688, |
|
"learning_rate": 2.9049385504830985e-05, |
|
"loss": 0.4439, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0037629583784620146, |
|
"grad_norm": 2.353053331375122, |
|
"learning_rate": 2.4285108555160577e-05, |
|
"loss": 0.4638, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0039610088194337, |
|
"grad_norm": 2.4651124477386475, |
|
"learning_rate": 2.0092991918301108e-05, |
|
"loss": 0.4453, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.004159059260405384, |
|
"grad_norm": 2.5981829166412354, |
|
"learning_rate": 1.6551126795408016e-05, |
|
"loss": 0.5069, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00435710970137707, |
|
"grad_norm": 1.784904956817627, |
|
"learning_rate": 1.3725491432254624e-05, |
|
"loss": 0.5401, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.004555160142348754, |
|
"grad_norm": 0.9995982646942139, |
|
"learning_rate": 1.1668722069349041e-05, |
|
"loss": 0.5714, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.00475321058332044, |
|
"grad_norm": 1.417913556098938, |
|
"learning_rate": 1.0419132428365116e-05, |
|
"loss": 0.7382, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.004951261024292124, |
|
"grad_norm": 3.399224042892456, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8122, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004951261024292124, |
|
"eval_loss": 0.410976380109787, |
|
"eval_runtime": 3.0576, |
|
"eval_samples_per_second": 16.353, |
|
"eval_steps_per_second": 4.252, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.6634211685721702e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|