|
{ |
|
"best_metric": 0.038246795535087585, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0588235294117645, |
|
"eval_steps": 25, |
|
"global_step": 39, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0784313725490196, |
|
"grad_norm": 60.86349868774414, |
|
"learning_rate": 5e-05, |
|
"loss": 87.992, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0784313725490196, |
|
"eval_loss": 6.279521465301514, |
|
"eval_runtime": 2.283, |
|
"eval_samples_per_second": 21.901, |
|
"eval_steps_per_second": 5.694, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1568627450980392, |
|
"grad_norm": 66.25541687011719, |
|
"learning_rate": 0.0001, |
|
"loss": 92.7498, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 83.56111907958984, |
|
"learning_rate": 9.983788698441369e-05, |
|
"loss": 104.2699, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.3137254901960784, |
|
"grad_norm": 74.07500457763672, |
|
"learning_rate": 9.935271596564688e-05, |
|
"loss": 73.7409, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.39215686274509803, |
|
"grad_norm": 122.21480560302734, |
|
"learning_rate": 9.854798261200746e-05, |
|
"loss": 52.6066, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.47058823529411764, |
|
"grad_norm": 216.04931640625, |
|
"learning_rate": 9.74294850457488e-05, |
|
"loss": 24.5554, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5490196078431373, |
|
"grad_norm": 169.465087890625, |
|
"learning_rate": 9.600528206746612e-05, |
|
"loss": 27.2953, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.6274509803921569, |
|
"grad_norm": 143.66921997070312, |
|
"learning_rate": 9.428563509225347e-05, |
|
"loss": 18.9022, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.7058823529411765, |
|
"grad_norm": 105.4874267578125, |
|
"learning_rate": 9.22829342159729e-05, |
|
"loss": 5.5785, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7843137254901961, |
|
"grad_norm": 118.35433197021484, |
|
"learning_rate": 9.001160894432978e-05, |
|
"loss": 12.1629, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8627450980392157, |
|
"grad_norm": 195.34605407714844, |
|
"learning_rate": 8.74880242279536e-05, |
|
"loss": 9.8084, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.9411764705882353, |
|
"grad_norm": 267.8063049316406, |
|
"learning_rate": 8.473036255255366e-05, |
|
"loss": 8.0296, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.0196078431372548, |
|
"grad_norm": 39.37614440917969, |
|
"learning_rate": 8.175849293369291e-05, |
|
"loss": 4.7077, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0980392156862746, |
|
"grad_norm": 72.95148468017578, |
|
"learning_rate": 7.859382776007543e-05, |
|
"loss": 6.9191, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.1764705882352942, |
|
"grad_norm": 63.84526443481445, |
|
"learning_rate": 7.525916851679529e-05, |
|
"loss": 4.7309, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.2549019607843137, |
|
"grad_norm": 77.97026062011719, |
|
"learning_rate": 7.177854150011389e-05, |
|
"loss": 2.3247, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 25.761390686035156, |
|
"learning_rate": 6.817702470744477e-05, |
|
"loss": 1.8443, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.4117647058823528, |
|
"grad_norm": 65.09815216064453, |
|
"learning_rate": 6.448056714980767e-05, |
|
"loss": 3.2645, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.4901960784313726, |
|
"grad_norm": 42.31401824951172, |
|
"learning_rate": 6.071580188860955e-05, |
|
"loss": 1.0521, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.5686274509803921, |
|
"grad_norm": 33.89790725708008, |
|
"learning_rate": 5.690985414382668e-05, |
|
"loss": 1.5936, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.6470588235294117, |
|
"grad_norm": 29.281991958618164, |
|
"learning_rate": 5.3090145856173346e-05, |
|
"loss": 1.6774, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.7254901960784315, |
|
"grad_norm": 10.227999687194824, |
|
"learning_rate": 4.9284198111390456e-05, |
|
"loss": 0.2475, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.803921568627451, |
|
"grad_norm": 40.34574890136719, |
|
"learning_rate": 4.551943285019234e-05, |
|
"loss": 1.5025, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.8823529411764706, |
|
"grad_norm": 21.78724479675293, |
|
"learning_rate": 4.182297529255525e-05, |
|
"loss": 1.3656, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.9607843137254903, |
|
"grad_norm": 6.2362823486328125, |
|
"learning_rate": 3.822145849988612e-05, |
|
"loss": 0.3406, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.9607843137254903, |
|
"eval_loss": 0.038246795535087585, |
|
"eval_runtime": 1.8295, |
|
"eval_samples_per_second": 27.331, |
|
"eval_steps_per_second": 7.106, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.0392156862745097, |
|
"grad_norm": 3.510915994644165, |
|
"learning_rate": 3.474083148320469e-05, |
|
"loss": 0.4248, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.1176470588235294, |
|
"grad_norm": 8.030313491821289, |
|
"learning_rate": 3.1406172239924584e-05, |
|
"loss": 0.8069, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.196078431372549, |
|
"grad_norm": 26.57486915588379, |
|
"learning_rate": 2.8241507066307104e-05, |
|
"loss": 1.1415, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.2745098039215685, |
|
"grad_norm": 6.342585563659668, |
|
"learning_rate": 2.5269637447446348e-05, |
|
"loss": 0.0675, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.3529411764705883, |
|
"grad_norm": 20.42534065246582, |
|
"learning_rate": 2.2511975772046403e-05, |
|
"loss": 1.1795, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.431372549019608, |
|
"grad_norm": 71.26493835449219, |
|
"learning_rate": 1.9988391055670233e-05, |
|
"loss": 1.4359, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.5098039215686274, |
|
"grad_norm": 4.1655449867248535, |
|
"learning_rate": 1.771706578402711e-05, |
|
"loss": 0.0568, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.588235294117647, |
|
"grad_norm": 12.102375030517578, |
|
"learning_rate": 1.5714364907746536e-05, |
|
"loss": 0.9678, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 25.678956985473633, |
|
"learning_rate": 1.3994717932533891e-05, |
|
"loss": 1.2355, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.7450980392156863, |
|
"grad_norm": 1.5909215211868286, |
|
"learning_rate": 1.257051495425121e-05, |
|
"loss": 0.0221, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.8235294117647056, |
|
"grad_norm": 33.322696685791016, |
|
"learning_rate": 1.1452017387992552e-05, |
|
"loss": 1.1316, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.9019607843137254, |
|
"grad_norm": 23.584259033203125, |
|
"learning_rate": 1.064728403435312e-05, |
|
"loss": 0.9945, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.980392156862745, |
|
"grad_norm": 6.629971981048584, |
|
"learning_rate": 1.0162113015586309e-05, |
|
"loss": 0.3805, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.0588235294117645, |
|
"grad_norm": 8.110614776611328, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5328, |
|
"step": 39 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 39, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.104695101563863e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|