|
{ |
|
"best_metric": 0.8585520386695862, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0476190476190474, |
|
"eval_steps": 25, |
|
"global_step": 40, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0761904761904762, |
|
"grad_norm": 14.026223182678223, |
|
"learning_rate": 5e-05, |
|
"loss": 7.5144, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0761904761904762, |
|
"eval_loss": 7.233830451965332, |
|
"eval_runtime": 1.8952, |
|
"eval_samples_per_second": 46.961, |
|
"eval_steps_per_second": 6.332, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1523809523809524, |
|
"grad_norm": 14.973212242126465, |
|
"learning_rate": 0.0001, |
|
"loss": 7.9078, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 13.781585693359375, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 7.1847, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.3047619047619048, |
|
"grad_norm": 12.180188179016113, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 5.6694, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 9.784143447875977, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 4.3383, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 9.046195983886719, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 3.3229, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 7.573584079742432, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 2.4241, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.6095238095238096, |
|
"grad_norm": 7.039599418640137, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 1.9458, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 4.332740306854248, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 1.2798, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 2.105264663696289, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 1.0346, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8380952380952381, |
|
"grad_norm": 4.982356071472168, |
|
"learning_rate": 8.678619553365659e-05, |
|
"loss": 1.011, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 2.1526806354522705, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 0.9524, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9904761904761905, |
|
"grad_norm": 1.4837459325790405, |
|
"learning_rate": 8.07106356344834e-05, |
|
"loss": 0.9343, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 3.639598846435547, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 1.7018, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 1.631341814994812, |
|
"learning_rate": 7.379736965185368e-05, |
|
"loss": 0.9145, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.2190476190476192, |
|
"grad_norm": 1.466554045677185, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 0.9415, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2952380952380953, |
|
"grad_norm": 5.518876075744629, |
|
"learning_rate": 6.623497346023418e-05, |
|
"loss": 0.8771, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.3714285714285714, |
|
"grad_norm": 1.651365876197815, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 0.9171, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.4476190476190476, |
|
"grad_norm": 1.233325719833374, |
|
"learning_rate": 5.8229729514036705e-05, |
|
"loss": 0.9414, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.5238095238095237, |
|
"grad_norm": 1.4162348508834839, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 0.8752, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 1.5792149305343628, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9103, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.6761904761904762, |
|
"grad_norm": 1.1879042387008667, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 0.9003, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.7523809523809524, |
|
"grad_norm": 0.9944649338722229, |
|
"learning_rate": 4.17702704859633e-05, |
|
"loss": 0.8776, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.8285714285714287, |
|
"grad_norm": 3.7743890285491943, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 0.911, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 0.7919507622718811, |
|
"learning_rate": 3.3765026539765834e-05, |
|
"loss": 0.891, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"eval_loss": 0.8585520386695862, |
|
"eval_runtime": 1.8941, |
|
"eval_samples_per_second": 46.988, |
|
"eval_steps_per_second": 6.335, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.980952380952381, |
|
"grad_norm": 0.9605194330215454, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 0.887, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.057142857142857, |
|
"grad_norm": 2.160217523574829, |
|
"learning_rate": 2.6202630348146324e-05, |
|
"loss": 1.6403, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.1333333333333333, |
|
"grad_norm": 1.0214014053344727, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 0.9062, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.2095238095238097, |
|
"grad_norm": 1.190313458442688, |
|
"learning_rate": 1.928936436551661e-05, |
|
"loss": 0.853, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 1.1187245845794678, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 0.8842, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.361904761904762, |
|
"grad_norm": 0.9486127495765686, |
|
"learning_rate": 1.3213804466343421e-05, |
|
"loss": 0.8989, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.4380952380952383, |
|
"grad_norm": 1.1477197408676147, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 0.8428, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.5142857142857142, |
|
"grad_norm": 0.8318337798118591, |
|
"learning_rate": 8.141676086873572e-06, |
|
"loss": 0.8689, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.5904761904761906, |
|
"grad_norm": 0.9033652544021606, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 0.8886, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 1.3068876266479492, |
|
"learning_rate": 4.2113336672471245e-06, |
|
"loss": 0.8713, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.742857142857143, |
|
"grad_norm": 1.5723620653152466, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 0.8887, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.819047619047619, |
|
"grad_norm": 0.7510371804237366, |
|
"learning_rate": 1.5299867030334814e-06, |
|
"loss": 0.8693, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.895238095238095, |
|
"grad_norm": 0.9378589987754822, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 0.8705, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.9714285714285715, |
|
"grad_norm": 1.0388190746307373, |
|
"learning_rate": 1.7077534966650766e-07, |
|
"loss": 0.8689, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 3.0476190476190474, |
|
"grad_norm": 2.3041744232177734, |
|
"learning_rate": 0.0, |
|
"loss": 1.5959, |
|
"step": 40 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 40, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.2528685890404352e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|