|
{ |
|
"best_metric": 1.0646418333053589, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.053099694676755606, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0010619938935351122, |
|
"grad_norm": 50.445369720458984, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 43.7219, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010619938935351122, |
|
"eval_loss": 1.3181114196777344, |
|
"eval_runtime": 4.7962, |
|
"eval_samples_per_second": 10.425, |
|
"eval_steps_per_second": 2.71, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0021239877870702245, |
|
"grad_norm": 22.956039428710938, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 40.9591, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0031859816806053365, |
|
"grad_norm": 66.04405975341797, |
|
"learning_rate": 0.0001, |
|
"loss": 70.2201, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004247975574140449, |
|
"grad_norm": 46.3990592956543, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 51.5672, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005309969467675561, |
|
"grad_norm": 30.05483055114746, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 43.201, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006371963361210673, |
|
"grad_norm": 24.478403091430664, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 38.8057, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.007433957254745785, |
|
"grad_norm": 24.022966384887695, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 37.9515, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.008495951148280898, |
|
"grad_norm": 21.067651748657227, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 41.8724, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.009557945041816009, |
|
"grad_norm": 17.71038818359375, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 38.8532, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.010619938935351122, |
|
"grad_norm": 23.2802791595459, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 44.3834, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011681932828886235, |
|
"grad_norm": 25.220415115356445, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 43.2465, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.012743926722421346, |
|
"grad_norm": 30.065141677856445, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 42.4665, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.013805920615956459, |
|
"grad_norm": 47.5212287902832, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 50.1018, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01486791450949157, |
|
"grad_norm": 34.7642936706543, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 40.72, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01592990840302668, |
|
"grad_norm": 23.62519073486328, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 42.3325, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.016991902296561796, |
|
"grad_norm": 42.359012603759766, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 54.7568, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.018053896190096907, |
|
"grad_norm": 21.744274139404297, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 36.6027, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.019115890083632018, |
|
"grad_norm": 17.517501831054688, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 33.6735, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.020177883977167133, |
|
"grad_norm": 13.376629829406738, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 34.858, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.021239877870702244, |
|
"grad_norm": 16.480670928955078, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 35.9226, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.022301871764237355, |
|
"grad_norm": 14.814435958862305, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 36.5462, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02336386565777247, |
|
"grad_norm": 15.242733001708984, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 38.7139, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02442585955130758, |
|
"grad_norm": 16.00188636779785, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 39.0224, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.025487853444842692, |
|
"grad_norm": 16.244796752929688, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 38.5783, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.026549847338377803, |
|
"grad_norm": 26.823301315307617, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 38.7784, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.026549847338377803, |
|
"eval_loss": 1.1134846210479736, |
|
"eval_runtime": 5.1844, |
|
"eval_samples_per_second": 9.644, |
|
"eval_steps_per_second": 2.508, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.027611841231912918, |
|
"grad_norm": 16.71857261657715, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 36.6123, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02867383512544803, |
|
"grad_norm": 10.87404727935791, |
|
"learning_rate": 7.75e-05, |
|
"loss": 37.5371, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02973582901898314, |
|
"grad_norm": 56.00872039794922, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 64.4314, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.030797822912518254, |
|
"grad_norm": 22.56377601623535, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 35.6372, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03185981680605336, |
|
"grad_norm": 12.734387397766113, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 32.3161, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03292181069958848, |
|
"grad_norm": 14.141098976135254, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 34.3303, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03398380459312359, |
|
"grad_norm": 9.78072738647461, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 32.2263, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0350457984866587, |
|
"grad_norm": 12.632477760314941, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 33.7613, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.036107792380193814, |
|
"grad_norm": 14.908531188964844, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 36.611, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.037169786273728925, |
|
"grad_norm": 22.300689697265625, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 36.3684, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.038231780167264036, |
|
"grad_norm": 22.755752563476562, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 37.6983, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03929377406079915, |
|
"grad_norm": 22.28518295288086, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 37.6341, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.040355767954334265, |
|
"grad_norm": 29.46213150024414, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 41.3437, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.041417761847869376, |
|
"grad_norm": 9.17482852935791, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 36.185, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04247975574140449, |
|
"grad_norm": 11.093894004821777, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 38.199, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0435417496349396, |
|
"grad_norm": 48.82663345336914, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 52.5876, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04460374352847471, |
|
"grad_norm": 14.177578926086426, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 33.7556, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04566573742200982, |
|
"grad_norm": 12.167204856872559, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 31.3788, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04672773131554494, |
|
"grad_norm": 14.164153099060059, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 30.4894, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04778972520908005, |
|
"grad_norm": 13.399406433105469, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 32.0962, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04885171910261516, |
|
"grad_norm": 15.352862358093262, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 34.1184, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04991371299615027, |
|
"grad_norm": 15.30873966217041, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 33.8862, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.050975706889685383, |
|
"grad_norm": 14.761381149291992, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 33.5394, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.052037700783220495, |
|
"grad_norm": 18.559497833251953, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 37.7572, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.053099694676755606, |
|
"grad_norm": 29.4871826171875, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 37.6346, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.053099694676755606, |
|
"eval_loss": 1.0646418333053589, |
|
"eval_runtime": 5.2151, |
|
"eval_samples_per_second": 9.587, |
|
"eval_steps_per_second": 2.493, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.204505434610729e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|