|
{ |
|
"best_metric": 2.8416030406951904, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0651162790697675, |
|
"eval_steps": 25, |
|
"global_step": 41, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07441860465116279, |
|
"grad_norm": 1.4890468120574951, |
|
"learning_rate": 5e-05, |
|
"loss": 3.6365, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07441860465116279, |
|
"eval_loss": 3.9355320930480957, |
|
"eval_runtime": 4.7179, |
|
"eval_samples_per_second": 19.288, |
|
"eval_steps_per_second": 2.544, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.14883720930232558, |
|
"grad_norm": 1.5340938568115234, |
|
"learning_rate": 0.0001, |
|
"loss": 3.7887, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.22325581395348837, |
|
"grad_norm": 1.9364389181137085, |
|
"learning_rate": 9.983786540671051e-05, |
|
"loss": 4.1062, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.29767441860465116, |
|
"grad_norm": 1.6120408773422241, |
|
"learning_rate": 9.935251313189564e-05, |
|
"loss": 3.3304, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.37209302325581395, |
|
"grad_norm": 1.4697085618972778, |
|
"learning_rate": 9.85470908713026e-05, |
|
"loss": 3.3136, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.44651162790697674, |
|
"grad_norm": 1.4474133253097534, |
|
"learning_rate": 9.742682209735727e-05, |
|
"loss": 3.235, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5209302325581395, |
|
"grad_norm": 1.021986484527588, |
|
"learning_rate": 9.599897218294122e-05, |
|
"loss": 3.1347, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5953488372093023, |
|
"grad_norm": 0.8347269892692566, |
|
"learning_rate": 9.42728012826605e-05, |
|
"loss": 3.0038, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6697674418604651, |
|
"grad_norm": 0.9195536375045776, |
|
"learning_rate": 9.225950427718975e-05, |
|
"loss": 3.1287, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7441860465116279, |
|
"grad_norm": 0.7903404235839844, |
|
"learning_rate": 8.997213817017507e-05, |
|
"loss": 3.0327, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8186046511627907, |
|
"grad_norm": 0.5979586839675903, |
|
"learning_rate": 8.742553740855506e-05, |
|
"loss": 2.9139, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8930232558139535, |
|
"grad_norm": 0.6653117537498474, |
|
"learning_rate": 8.463621767547998e-05, |
|
"loss": 2.9011, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9674418604651163, |
|
"grad_norm": 0.8118610978126526, |
|
"learning_rate": 8.162226877976887e-05, |
|
"loss": 3.1099, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0465116279069768, |
|
"grad_norm": 1.105678677558899, |
|
"learning_rate": 7.840323733655778e-05, |
|
"loss": 4.8536, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.1209302325581396, |
|
"grad_norm": 0.7873435020446777, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.6658, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1953488372093024, |
|
"grad_norm": 0.918388307094574, |
|
"learning_rate": 7.143462807015271e-05, |
|
"loss": 2.889, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2697674418604652, |
|
"grad_norm": 0.7034196853637695, |
|
"learning_rate": 6.773024435212678e-05, |
|
"loss": 2.5797, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.344186046511628, |
|
"grad_norm": 0.7909281253814697, |
|
"learning_rate": 6.391087319582264e-05, |
|
"loss": 2.8715, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.4186046511627908, |
|
"grad_norm": 0.8187082409858704, |
|
"learning_rate": 6.0001284688802226e-05, |
|
"loss": 2.7232, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4930232558139536, |
|
"grad_norm": 0.7360689043998718, |
|
"learning_rate": 5.602683401276615e-05, |
|
"loss": 2.363, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5674418604651161, |
|
"grad_norm": 0.8096981048583984, |
|
"learning_rate": 5.201329700547076e-05, |
|
"loss": 3.0653, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.6418604651162791, |
|
"grad_norm": 0.9424793720245361, |
|
"learning_rate": 4.798670299452926e-05, |
|
"loss": 2.7602, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.7162790697674417, |
|
"grad_norm": 0.7494408488273621, |
|
"learning_rate": 4.397316598723385e-05, |
|
"loss": 2.145, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.7906976744186047, |
|
"grad_norm": 0.7966036796569824, |
|
"learning_rate": 3.9998715311197785e-05, |
|
"loss": 3.2073, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.8651162790697673, |
|
"grad_norm": 0.982150673866272, |
|
"learning_rate": 3.608912680417737e-05, |
|
"loss": 2.649, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8651162790697673, |
|
"eval_loss": 2.8416030406951904, |
|
"eval_runtime": 4.7173, |
|
"eval_samples_per_second": 19.291, |
|
"eval_steps_per_second": 2.544, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.9395348837209303, |
|
"grad_norm": 1.3147518634796143, |
|
"learning_rate": 3.226975564787322e-05, |
|
"loss": 2.8061, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.0186046511627906, |
|
"grad_norm": 1.0044997930526733, |
|
"learning_rate": 2.8565371929847284e-05, |
|
"loss": 4.262, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.0930232558139537, |
|
"grad_norm": 0.6567540168762207, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.4335, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.167441860465116, |
|
"grad_norm": 0.7161814570426941, |
|
"learning_rate": 2.1596762663442218e-05, |
|
"loss": 2.6255, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.2418604651162792, |
|
"grad_norm": 0.6859881281852722, |
|
"learning_rate": 1.837773122023114e-05, |
|
"loss": 2.2539, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.316279069767442, |
|
"grad_norm": 0.6596910357475281, |
|
"learning_rate": 1.536378232452003e-05, |
|
"loss": 2.9981, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.390697674418605, |
|
"grad_norm": 0.8064484000205994, |
|
"learning_rate": 1.257446259144494e-05, |
|
"loss": 2.6408, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.4651162790697674, |
|
"grad_norm": 0.6943879127502441, |
|
"learning_rate": 1.0027861829824952e-05, |
|
"loss": 2.3235, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.5395348837209304, |
|
"grad_norm": 0.6679330468177795, |
|
"learning_rate": 7.740495722810271e-06, |
|
"loss": 2.8428, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.613953488372093, |
|
"grad_norm": 0.7221952676773071, |
|
"learning_rate": 5.727198717339511e-06, |
|
"loss": 2.5881, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.688372093023256, |
|
"grad_norm": 0.6440845727920532, |
|
"learning_rate": 4.001027817058789e-06, |
|
"loss": 2.4293, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.7627906976744185, |
|
"grad_norm": 0.7320359349250793, |
|
"learning_rate": 2.573177902642726e-06, |
|
"loss": 2.7737, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.8372093023255816, |
|
"grad_norm": 0.7235525250434875, |
|
"learning_rate": 1.4529091286973995e-06, |
|
"loss": 2.5904, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.911627906976744, |
|
"grad_norm": 0.897057294845581, |
|
"learning_rate": 6.474868681043578e-07, |
|
"loss": 2.5468, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.986046511627907, |
|
"grad_norm": 1.5236244201660156, |
|
"learning_rate": 1.6213459328950352e-07, |
|
"loss": 3.9375, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.0651162790697675, |
|
"grad_norm": 0.9568150043487549, |
|
"learning_rate": 0.0, |
|
"loss": 2.9471, |
|
"step": 41 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 41, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.71444728304894e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|