|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.0398704211313232, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000797408422626464, |
|
"grad_norm": 3.118460178375244, |
|
"learning_rate": 5e-05, |
|
"loss": 3.3129, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000797408422626464, |
|
"eval_loss": NaN, |
|
"eval_runtime": 57.9441, |
|
"eval_samples_per_second": 145.813, |
|
"eval_steps_per_second": 18.242, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001594816845252928, |
|
"grad_norm": 4.203450679779053, |
|
"learning_rate": 0.0001, |
|
"loss": 3.2103, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.002392225267879392, |
|
"grad_norm": 4.596311092376709, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 3.7695, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003189633690505856, |
|
"grad_norm": 4.897176265716553, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.7375, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00398704211313232, |
|
"grad_norm": 6.114984035491943, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 3.3151, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004784450535758784, |
|
"grad_norm": 4.544273853302002, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 2.8091, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005581858958385248, |
|
"grad_norm": 5.7902045249938965, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 2.8499, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006379267381011712, |
|
"grad_norm": 4.928606033325195, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.8564, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0071766758036381755, |
|
"grad_norm": 7.858636379241943, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 2.9428, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00797408422626464, |
|
"grad_norm": 9.078240394592285, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 3.1329, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008771492648891105, |
|
"grad_norm": 7.502594470977783, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.5782, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009568901071517567, |
|
"grad_norm": 13.563742637634277, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.8071, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010366309494144032, |
|
"grad_norm": 3.1314632892608643, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 2.3669, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.011163717916770496, |
|
"grad_norm": 3.0550808906555176, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.4571, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01196112633939696, |
|
"grad_norm": 2.894690752029419, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 2.5549, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012758534762023424, |
|
"grad_norm": 2.5987496376037598, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 2.2837, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013555943184649888, |
|
"grad_norm": 3.6536061763763428, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 2.6145, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.014353351607276351, |
|
"grad_norm": 3.160776138305664, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.4515, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.015150760029902816, |
|
"grad_norm": 5.913760662078857, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 2.8022, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01594816845252928, |
|
"grad_norm": 5.53171968460083, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 2.9629, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.016745576875155745, |
|
"grad_norm": 3.351043939590454, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 2.362, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01754298529778221, |
|
"grad_norm": 6.70497989654541, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 2.3642, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01834039372040867, |
|
"grad_norm": 5.723726749420166, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 2.7549, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.019137802143035135, |
|
"grad_norm": 3.634131908416748, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.793, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0199352105656616, |
|
"grad_norm": 9.935222625732422, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 2.3597, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0199352105656616, |
|
"eval_loss": NaN, |
|
"eval_runtime": 57.9048, |
|
"eval_samples_per_second": 145.912, |
|
"eval_steps_per_second": 18.254, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.020732618988288064, |
|
"grad_norm": 1.758143424987793, |
|
"learning_rate": 5e-05, |
|
"loss": 2.3564, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02153002741091453, |
|
"grad_norm": 1.8171120882034302, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 2.3386, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.022327435833540993, |
|
"grad_norm": 2.4620540142059326, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 2.2566, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.023124844256167457, |
|
"grad_norm": 2.1204512119293213, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 2.3241, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.02392225267879392, |
|
"grad_norm": 3.1958048343658447, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 2.2001, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.024719661101420383, |
|
"grad_norm": 3.4434661865234375, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 2.1418, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.025517069524046847, |
|
"grad_norm": 3.5963659286499023, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.5206, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.026314477946673312, |
|
"grad_norm": 3.911703586578369, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.9435, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.027111886369299776, |
|
"grad_norm": 4.419018745422363, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.6252, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.02790929479192624, |
|
"grad_norm": 3.8828225135803223, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.6891, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.028706703214552702, |
|
"grad_norm": 5.121977806091309, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.4323, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.029504111637179167, |
|
"grad_norm": 3.6891393661499023, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.7424, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03030152005980563, |
|
"grad_norm": 1.6718828678131104, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.2177, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.031098928482432096, |
|
"grad_norm": 1.5160930156707764, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 2.2251, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03189633690505856, |
|
"grad_norm": 1.7978293895721436, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.2484, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03269374532768502, |
|
"grad_norm": 2.5029444694519043, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 2.3805, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03349115375031149, |
|
"grad_norm": 3.7533793449401855, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.1574, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03428856217293795, |
|
"grad_norm": 5.509903907775879, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 2.4423, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03508597059556442, |
|
"grad_norm": 3.371899127960205, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.3273, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03588337901819088, |
|
"grad_norm": 4.512765884399414, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 2.0492, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03668078744081734, |
|
"grad_norm": 6.9926981925964355, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 2.8342, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03747819586344381, |
|
"grad_norm": 5.881093978881836, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 2.3712, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03827560428607027, |
|
"grad_norm": 3.9496421813964844, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.7801, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03907301270869674, |
|
"grad_norm": 6.15703010559082, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 2.0498, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0398704211313232, |
|
"grad_norm": 5.4105987548828125, |
|
"learning_rate": 0.0, |
|
"loss": 1.9125, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0398704211313232, |
|
"eval_loss": NaN, |
|
"eval_runtime": 57.9087, |
|
"eval_samples_per_second": 145.902, |
|
"eval_steps_per_second": 18.253, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.95300912644096e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|