|
{ |
|
"best_metric": 11.924012184143066, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.047088369172814316, |
|
"eval_steps": 50, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006278449223041908, |
|
"grad_norm": 0.02513544075191021, |
|
"learning_rate": 5e-06, |
|
"loss": 11.9326, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006278449223041908, |
|
"eval_loss": 11.9313383102417, |
|
"eval_runtime": 28.1146, |
|
"eval_samples_per_second": 95.431, |
|
"eval_steps_per_second": 23.867, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012556898446083817, |
|
"grad_norm": 0.022503087297081947, |
|
"learning_rate": 1e-05, |
|
"loss": 11.9316, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0018835347669125726, |
|
"grad_norm": 0.0192073043435812, |
|
"learning_rate": 1.5e-05, |
|
"loss": 11.9334, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0025113796892167633, |
|
"grad_norm": 0.019655032083392143, |
|
"learning_rate": 2e-05, |
|
"loss": 11.9319, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0031392246115209545, |
|
"grad_norm": 0.02161627821624279, |
|
"learning_rate": 2.5e-05, |
|
"loss": 11.9297, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003767069533825145, |
|
"grad_norm": 0.020643681287765503, |
|
"learning_rate": 3e-05, |
|
"loss": 11.932, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004394914456129336, |
|
"grad_norm": 0.021672042086720467, |
|
"learning_rate": 3.5e-05, |
|
"loss": 11.9308, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005022759378433527, |
|
"grad_norm": 0.022838007658720016, |
|
"learning_rate": 4e-05, |
|
"loss": 11.935, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005650604300737718, |
|
"grad_norm": 0.02091800980269909, |
|
"learning_rate": 4.5e-05, |
|
"loss": 11.9292, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006278449223041909, |
|
"grad_norm": 0.02816697768867016, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9201, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006906294145346099, |
|
"grad_norm": 0.018409784883260727, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 11.9301, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00753413906765029, |
|
"grad_norm": 0.06388456374406815, |
|
"learning_rate": 6e-05, |
|
"loss": 11.9289, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00816198398995448, |
|
"grad_norm": 0.026801548898220062, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 11.9307, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.008789828912258673, |
|
"grad_norm": 0.029942037537693977, |
|
"learning_rate": 7e-05, |
|
"loss": 11.9314, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.009417673834562863, |
|
"grad_norm": 0.025479784235358238, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 11.9338, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010045518756867053, |
|
"grad_norm": 0.027909839525818825, |
|
"learning_rate": 8e-05, |
|
"loss": 11.9308, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.010673363679171245, |
|
"grad_norm": 0.02828867919743061, |
|
"learning_rate": 8.5e-05, |
|
"loss": 11.9292, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011301208601475436, |
|
"grad_norm": 0.022867796942591667, |
|
"learning_rate": 9e-05, |
|
"loss": 11.931, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.011929053523779626, |
|
"grad_norm": 0.038008660078048706, |
|
"learning_rate": 9.5e-05, |
|
"loss": 11.9306, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.012556898446083818, |
|
"grad_norm": 0.030086949467658997, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9318, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.013184743368388008, |
|
"grad_norm": 0.02166604809463024, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 11.9325, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.013812588290692198, |
|
"grad_norm": 0.04334748908877373, |
|
"learning_rate": 9.967408676742751e-05, |
|
"loss": 11.9082, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01444043321299639, |
|
"grad_norm": 0.05343957245349884, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 11.9279, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01506827813530058, |
|
"grad_norm": 0.03913533315062523, |
|
"learning_rate": 9.870059584711668e-05, |
|
"loss": 11.9316, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01569612305760477, |
|
"grad_norm": 0.04433680325746536, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 11.9272, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01632396797990896, |
|
"grad_norm": 0.03774190694093704, |
|
"learning_rate": 9.709221818197624e-05, |
|
"loss": 11.9315, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.016951812902213155, |
|
"grad_norm": 0.04844358563423157, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 11.9325, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.017579657824517345, |
|
"grad_norm": 0.07205117493867874, |
|
"learning_rate": 9.486992143456792e-05, |
|
"loss": 11.9292, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.018207502746821536, |
|
"grad_norm": 0.03811033442616463, |
|
"learning_rate": 9.353730385598887e-05, |
|
"loss": 11.9309, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.018835347669125726, |
|
"grad_norm": 0.051117513328790665, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 11.9311, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.019463192591429916, |
|
"grad_norm": 0.08851032704114914, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 11.9316, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.020091037513734106, |
|
"grad_norm": 0.08320491760969162, |
|
"learning_rate": 8.870708053195413e-05, |
|
"loss": 11.9287, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0207188824360383, |
|
"grad_norm": 0.07352913171052933, |
|
"learning_rate": 8.683705689382024e-05, |
|
"loss": 11.9271, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02134672735834249, |
|
"grad_norm": 0.07860593497753143, |
|
"learning_rate": 8.484687843276469e-05, |
|
"loss": 11.9295, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.02197457228064668, |
|
"grad_norm": 0.09285508096218109, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 11.9289, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02260241720295087, |
|
"grad_norm": 0.10461730509996414, |
|
"learning_rate": 8.053239398177191e-05, |
|
"loss": 11.9282, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02323026212525506, |
|
"grad_norm": 0.11389969289302826, |
|
"learning_rate": 7.822216094333847e-05, |
|
"loss": 11.9257, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02385810704755925, |
|
"grad_norm": 0.09162261337041855, |
|
"learning_rate": 7.58198730819481e-05, |
|
"loss": 11.9299, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.024485951969863445, |
|
"grad_norm": 0.1054549589753151, |
|
"learning_rate": 7.333336616128369e-05, |
|
"loss": 11.9254, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.025113796892167636, |
|
"grad_norm": 0.12928307056427002, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 11.9247, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.025741641814471826, |
|
"grad_norm": 0.08289871364831924, |
|
"learning_rate": 6.814038526753205e-05, |
|
"loss": 11.9277, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.026369486736776016, |
|
"grad_norm": 0.086146779358387, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 11.9263, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.026997331659080206, |
|
"grad_norm": 0.1091722697019577, |
|
"learning_rate": 6.271091670967436e-05, |
|
"loss": 11.9255, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.027625176581384397, |
|
"grad_norm": 0.0930371880531311, |
|
"learning_rate": 5.992952333228728e-05, |
|
"loss": 11.9262, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.02825302150368859, |
|
"grad_norm": 0.09138721227645874, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 11.9259, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02888086642599278, |
|
"grad_norm": 0.08972511440515518, |
|
"learning_rate": 5.427875042394199e-05, |
|
"loss": 11.9245, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.02950871134829697, |
|
"grad_norm": 0.07062887400388718, |
|
"learning_rate": 5.142780253968481e-05, |
|
"loss": 11.9238, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03013655627060116, |
|
"grad_norm": 0.07069500535726547, |
|
"learning_rate": 4.85721974603152e-05, |
|
"loss": 11.9254, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03076440119290535, |
|
"grad_norm": 0.07684428244829178, |
|
"learning_rate": 4.5721249576058027e-05, |
|
"loss": 11.9225, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03139224611520954, |
|
"grad_norm": 0.08182159811258316, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 11.9253, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03139224611520954, |
|
"eval_loss": 11.924012184143066, |
|
"eval_runtime": 28.3289, |
|
"eval_samples_per_second": 94.709, |
|
"eval_steps_per_second": 23.686, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.032020091037513736, |
|
"grad_norm": 0.08443606644868851, |
|
"learning_rate": 4.007047666771274e-05, |
|
"loss": 11.9236, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.03264793595981792, |
|
"grad_norm": 0.125528022646904, |
|
"learning_rate": 3.728908329032567e-05, |
|
"loss": 11.9254, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.033275780882122116, |
|
"grad_norm": 0.08672455698251724, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 11.9215, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.03390362580442631, |
|
"grad_norm": 0.07923948019742966, |
|
"learning_rate": 3.1859614732467954e-05, |
|
"loss": 11.9247, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0345314707267305, |
|
"grad_norm": 0.07594846189022064, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 11.9233, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.03515931564903469, |
|
"grad_norm": 0.0686110109090805, |
|
"learning_rate": 2.6666633838716314e-05, |
|
"loss": 11.9232, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.03578716057133888, |
|
"grad_norm": 0.08526276051998138, |
|
"learning_rate": 2.418012691805191e-05, |
|
"loss": 11.9256, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.03641500549364307, |
|
"grad_norm": 0.07757719606161118, |
|
"learning_rate": 2.1777839056661554e-05, |
|
"loss": 11.9213, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.03704285041594726, |
|
"grad_norm": 0.08641009032726288, |
|
"learning_rate": 1.946760601822809e-05, |
|
"loss": 11.9248, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.03767069533825145, |
|
"grad_norm": 0.09379945695400238, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 11.9224, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.038298540260555645, |
|
"grad_norm": 0.07984328269958496, |
|
"learning_rate": 1.5153121567235335e-05, |
|
"loss": 11.9233, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.03892638518285983, |
|
"grad_norm": 0.079765185713768, |
|
"learning_rate": 1.3162943106179749e-05, |
|
"loss": 11.9233, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.039554230105164026, |
|
"grad_norm": 0.06862898170948029, |
|
"learning_rate": 1.1292919468045877e-05, |
|
"loss": 11.9237, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.04018207502746821, |
|
"grad_norm": 0.07996665686368942, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 11.9191, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.04080991994977241, |
|
"grad_norm": 0.07772701978683472, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 11.9224, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0414377648720766, |
|
"grad_norm": 0.06042219698429108, |
|
"learning_rate": 6.462696144011149e-06, |
|
"loss": 11.9217, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.04206560979438079, |
|
"grad_norm": 0.07889144122600555, |
|
"learning_rate": 5.13007856543209e-06, |
|
"loss": 11.9256, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.04269345471668498, |
|
"grad_norm": 0.16606417298316956, |
|
"learning_rate": 3.9438173442575e-06, |
|
"loss": 11.9278, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.04332129963898917, |
|
"grad_norm": 0.05187804624438286, |
|
"learning_rate": 2.9077818180237693e-06, |
|
"loss": 11.9209, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.04394914456129336, |
|
"grad_norm": 0.060179345309734344, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 11.9248, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.04457698948359755, |
|
"grad_norm": 0.07308786362409592, |
|
"learning_rate": 1.2994041528833266e-06, |
|
"loss": 11.9221, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.04520483440590174, |
|
"grad_norm": 0.05937618017196655, |
|
"learning_rate": 7.323082076153509e-07, |
|
"loss": 11.9224, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.045832679328205936, |
|
"grad_norm": 0.0707944706082344, |
|
"learning_rate": 3.2591323257248893e-07, |
|
"loss": 11.9222, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.04646052425051012, |
|
"grad_norm": 0.056737788021564484, |
|
"learning_rate": 8.15448036932176e-08, |
|
"loss": 11.9212, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.047088369172814316, |
|
"grad_norm": 0.06692962348461151, |
|
"learning_rate": 0.0, |
|
"loss": 11.9188, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 729150259200.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|