|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0396746677246578, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007934933544931561, |
|
"grad_norm": 50.82598876953125, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 29.6105, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007934933544931561, |
|
"eval_loss": 3.7225465774536133, |
|
"eval_runtime": 210.8547, |
|
"eval_samples_per_second": 5.037, |
|
"eval_steps_per_second": 2.518, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015869867089863122, |
|
"grad_norm": 48.4813232421875, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 29.9806, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0023804800634794686, |
|
"grad_norm": 51.97000503540039, |
|
"learning_rate": 0.0002, |
|
"loss": 26.8948, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0031739734179726245, |
|
"grad_norm": 46.51694869995117, |
|
"learning_rate": 0.0001999048221581858, |
|
"loss": 21.9835, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003967466772465781, |
|
"grad_norm": 31.5517635345459, |
|
"learning_rate": 0.00019961946980917456, |
|
"loss": 10.75, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004760960126958937, |
|
"grad_norm": 23.97246742248535, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 6.3342, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005554453481452093, |
|
"grad_norm": 22.029800415039062, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 4.2752, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006347946835945249, |
|
"grad_norm": 8.115840911865234, |
|
"learning_rate": 0.00019762960071199333, |
|
"loss": 3.1482, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.007141440190438405, |
|
"grad_norm": 7.256277561187744, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 2.6701, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007934933544931562, |
|
"grad_norm": 34.10689926147461, |
|
"learning_rate": 0.0001953716950748227, |
|
"loss": 4.1156, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008728426899424718, |
|
"grad_norm": 12.535100936889648, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 2.9717, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009521920253917874, |
|
"grad_norm": 13.594742774963379, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 3.2695, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010315413608411029, |
|
"grad_norm": 8.208749771118164, |
|
"learning_rate": 0.000190630778703665, |
|
"loss": 3.0809, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.011108906962904185, |
|
"grad_norm": 4.1398773193359375, |
|
"learning_rate": 0.00018870108331782217, |
|
"loss": 2.6611, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011902400317397342, |
|
"grad_norm": 6.833378791809082, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 3.1422, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012695893671890498, |
|
"grad_norm": 6.5985612869262695, |
|
"learning_rate": 0.0001843391445812886, |
|
"loss": 3.1404, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013489387026383654, |
|
"grad_norm": 2.9224817752838135, |
|
"learning_rate": 0.0001819152044288992, |
|
"loss": 2.8423, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01428288038087681, |
|
"grad_norm": 6.999931335449219, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 3.2225, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.015076373735369967, |
|
"grad_norm": 3.803568124771118, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 2.888, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.015869867089863123, |
|
"grad_norm": 15.167760848999023, |
|
"learning_rate": 0.0001737277336810124, |
|
"loss": 6.0032, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01666336044435628, |
|
"grad_norm": 4.701049327850342, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 3.2004, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.017456853798849436, |
|
"grad_norm": 5.967288970947266, |
|
"learning_rate": 0.00016755902076156604, |
|
"loss": 2.896, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.018250347153342592, |
|
"grad_norm": 4.388175010681152, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 2.888, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01904384050783575, |
|
"grad_norm": 3.3615219593048096, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 2.8122, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0198373338623289, |
|
"grad_norm": 3.9244184494018555, |
|
"learning_rate": 0.0001573576436351046, |
|
"loss": 2.9292, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0198373338623289, |
|
"eval_loss": 0.3786240220069885, |
|
"eval_runtime": 213.3259, |
|
"eval_samples_per_second": 4.978, |
|
"eval_steps_per_second": 2.489, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.020630827216822058, |
|
"grad_norm": 3.8264613151550293, |
|
"learning_rate": 0.0001537299608346824, |
|
"loss": 2.9965, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.021424320571315214, |
|
"grad_norm": 4.253182411193848, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.9774, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02221781392580837, |
|
"grad_norm": 3.146336555480957, |
|
"learning_rate": 0.00014617486132350343, |
|
"loss": 2.9017, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.023011307280301527, |
|
"grad_norm": 2.462265968322754, |
|
"learning_rate": 0.00014226182617406996, |
|
"loss": 2.7964, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.023804800634794683, |
|
"grad_norm": 4.6121673583984375, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 3.0148, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02459829398928784, |
|
"grad_norm": 2.4022810459136963, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 2.7315, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.025391787343780996, |
|
"grad_norm": 12.726351737976074, |
|
"learning_rate": 0.00013007057995042732, |
|
"loss": 3.0036, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.026185280698274152, |
|
"grad_norm": 2.4199745655059814, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 2.7967, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02697877405276731, |
|
"grad_norm": 4.458526134490967, |
|
"learning_rate": 0.00012164396139381029, |
|
"loss": 2.9338, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.027772267407260465, |
|
"grad_norm": 4.283806324005127, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 2.8298, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02856576076175362, |
|
"grad_norm": 25.05265998840332, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 4.3322, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.029359254116246777, |
|
"grad_norm": 9.16774845123291, |
|
"learning_rate": 0.00010871557427476583, |
|
"loss": 3.1166, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.030152747470739934, |
|
"grad_norm": 9.907901763916016, |
|
"learning_rate": 0.00010436193873653361, |
|
"loss": 2.8739, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.03094624082523309, |
|
"grad_norm": 5.029708385467529, |
|
"learning_rate": 0.0001, |
|
"loss": 2.8051, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.031739734179726246, |
|
"grad_norm": 3.865046739578247, |
|
"learning_rate": 9.563806126346642e-05, |
|
"loss": 2.765, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0325332275342194, |
|
"grad_norm": 3.3393161296844482, |
|
"learning_rate": 9.128442572523417e-05, |
|
"loss": 2.8003, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.03332672088871256, |
|
"grad_norm": 2.8670387268066406, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 2.8394, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.034120214243205715, |
|
"grad_norm": 3.05271053314209, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 2.762, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03491370759769887, |
|
"grad_norm": 5.7023024559021, |
|
"learning_rate": 7.835603860618972e-05, |
|
"loss": 2.8546, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03570720095219203, |
|
"grad_norm": 6.604750633239746, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 2.7546, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.036500694306685184, |
|
"grad_norm": 8.486159324645996, |
|
"learning_rate": 6.992942004957271e-05, |
|
"loss": 3.0494, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03729418766117834, |
|
"grad_norm": 4.845094203948975, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 2.9887, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0380876810156715, |
|
"grad_norm": 4.348211288452148, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 2.9466, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.038881174370164646, |
|
"grad_norm": 4.239773750305176, |
|
"learning_rate": 5.773817382593008e-05, |
|
"loss": 3.1212, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0396746677246578, |
|
"grad_norm": 8.90642261505127, |
|
"learning_rate": 5.382513867649663e-05, |
|
"loss": 2.9847, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0396746677246578, |
|
"eval_loss": 0.36671048402786255, |
|
"eval_runtime": 213.134, |
|
"eval_samples_per_second": 4.983, |
|
"eval_steps_per_second": 2.491, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.53458201903104e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|