|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.02796420581655481, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003728560775540641, |
|
"grad_norm": 14.098886489868164, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 14.0719, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003728560775540641, |
|
"eval_loss": 1.8703862428665161, |
|
"eval_runtime": 509.0913, |
|
"eval_samples_per_second": 4.437, |
|
"eval_steps_per_second": 2.22, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007457121551081282, |
|
"grad_norm": 17.57554054260254, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 16.0438, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0011185682326621924, |
|
"grad_norm": 14.289568901062012, |
|
"learning_rate": 0.0002, |
|
"loss": 14.013, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0014914243102162564, |
|
"grad_norm": 16.104290008544922, |
|
"learning_rate": 0.0001999048221581858, |
|
"loss": 12.8257, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0018642803877703207, |
|
"grad_norm": 21.064966201782227, |
|
"learning_rate": 0.00019961946980917456, |
|
"loss": 10.156, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0022371364653243847, |
|
"grad_norm": 15.340848922729492, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 7.2449, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.002609992542878449, |
|
"grad_norm": 18.457630157470703, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 6.17, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.002982848620432513, |
|
"grad_norm": 13.35014820098877, |
|
"learning_rate": 0.00019762960071199333, |
|
"loss": 5.0583, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.003355704697986577, |
|
"grad_norm": 32.16826629638672, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 5.1311, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0037285607755406414, |
|
"grad_norm": 19.90258026123047, |
|
"learning_rate": 0.0001953716950748227, |
|
"loss": 4.7855, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004101416853094705, |
|
"grad_norm": 19.41876792907715, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 4.0821, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0044742729306487695, |
|
"grad_norm": 32.109249114990234, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 4.0448, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.004847129008202834, |
|
"grad_norm": 33.09053421020508, |
|
"learning_rate": 0.000190630778703665, |
|
"loss": 5.074, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.005219985085756898, |
|
"grad_norm": 12.651711463928223, |
|
"learning_rate": 0.00018870108331782217, |
|
"loss": 3.5618, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.005592841163310962, |
|
"grad_norm": 10.729059219360352, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 2.2738, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005965697240865026, |
|
"grad_norm": 17.2890567779541, |
|
"learning_rate": 0.0001843391445812886, |
|
"loss": 2.7353, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00633855331841909, |
|
"grad_norm": 23.347806930541992, |
|
"learning_rate": 0.0001819152044288992, |
|
"loss": 2.7702, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.006711409395973154, |
|
"grad_norm": 22.53374481201172, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 3.9281, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0070842654735272185, |
|
"grad_norm": 7.728074550628662, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 2.3155, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.007457121551081283, |
|
"grad_norm": 11.007797241210938, |
|
"learning_rate": 0.0001737277336810124, |
|
"loss": 2.4846, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.007829977628635347, |
|
"grad_norm": 7.158455848693848, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.9451, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00820283370618941, |
|
"grad_norm": 7.924790382385254, |
|
"learning_rate": 0.00016755902076156604, |
|
"loss": 1.974, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.008575689783743476, |
|
"grad_norm": 10.626357078552246, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 2.9784, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.008948545861297539, |
|
"grad_norm": 8.380443572998047, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 1.9285, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.009321401938851604, |
|
"grad_norm": 8.995612144470215, |
|
"learning_rate": 0.0001573576436351046, |
|
"loss": 2.5047, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.009321401938851604, |
|
"eval_loss": 0.322700172662735, |
|
"eval_runtime": 512.0454, |
|
"eval_samples_per_second": 4.412, |
|
"eval_steps_per_second": 2.207, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.009694258016405667, |
|
"grad_norm": 18.933256149291992, |
|
"learning_rate": 0.0001537299608346824, |
|
"loss": 3.038, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.010067114093959731, |
|
"grad_norm": 8.585943222045898, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.5117, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.010439970171513796, |
|
"grad_norm": 10.232223510742188, |
|
"learning_rate": 0.00014617486132350343, |
|
"loss": 2.5074, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.01081282624906786, |
|
"grad_norm": 8.64398193359375, |
|
"learning_rate": 0.00014226182617406996, |
|
"loss": 2.9964, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.011185682326621925, |
|
"grad_norm": 11.656851768493652, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 2.8767, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.011558538404175988, |
|
"grad_norm": 7.217341423034668, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 1.8044, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.011931394481730051, |
|
"grad_norm": 8.281699180603027, |
|
"learning_rate": 0.00013007057995042732, |
|
"loss": 2.0798, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.012304250559284116, |
|
"grad_norm": 12.387796401977539, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 3.5874, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01267710663683818, |
|
"grad_norm": 9.76694107055664, |
|
"learning_rate": 0.00012164396139381029, |
|
"loss": 2.8491, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.013049962714392245, |
|
"grad_norm": 7.733439922332764, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 2.16, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.013422818791946308, |
|
"grad_norm": 6.12880277633667, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 2.0449, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.013795674869500374, |
|
"grad_norm": 6.441651821136475, |
|
"learning_rate": 0.00010871557427476583, |
|
"loss": 1.8501, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.014168530947054437, |
|
"grad_norm": 15.817225456237793, |
|
"learning_rate": 0.00010436193873653361, |
|
"loss": 2.776, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0145413870246085, |
|
"grad_norm": 8.889180183410645, |
|
"learning_rate": 0.0001, |
|
"loss": 2.1201, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.014914243102162566, |
|
"grad_norm": 4.129387855529785, |
|
"learning_rate": 9.563806126346642e-05, |
|
"loss": 1.448, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.015287099179716629, |
|
"grad_norm": 9.360006332397461, |
|
"learning_rate": 9.128442572523417e-05, |
|
"loss": 2.4102, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.015659955257270694, |
|
"grad_norm": 11.799320220947266, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 2.9307, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01603281133482476, |
|
"grad_norm": 6.900726318359375, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 1.3355, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.01640566741237882, |
|
"grad_norm": 6.313440322875977, |
|
"learning_rate": 7.835603860618972e-05, |
|
"loss": 1.6515, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.016778523489932886, |
|
"grad_norm": 8.934613227844238, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 2.0291, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01715137956748695, |
|
"grad_norm": 9.353364944458008, |
|
"learning_rate": 6.992942004957271e-05, |
|
"loss": 1.9863, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.017524235645041013, |
|
"grad_norm": 8.81008243560791, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 1.7731, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.017897091722595078, |
|
"grad_norm": 11.235795974731445, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 1.6917, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.018269947800149143, |
|
"grad_norm": 8.671363830566406, |
|
"learning_rate": 5.773817382593008e-05, |
|
"loss": 2.321, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.018642803877703208, |
|
"grad_norm": 6.4699578285217285, |
|
"learning_rate": 5.382513867649663e-05, |
|
"loss": 1.7027, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.018642803877703208, |
|
"eval_loss": 0.25740864872932434, |
|
"eval_runtime": 511.7924, |
|
"eval_samples_per_second": 4.414, |
|
"eval_steps_per_second": 2.208, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01901565995525727, |
|
"grad_norm": 9.196285247802734, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 1.7973, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.019388516032811335, |
|
"grad_norm": 6.420137882232666, |
|
"learning_rate": 4.6270039165317605e-05, |
|
"loss": 1.7155, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0197613721103654, |
|
"grad_norm": 6.166407585144043, |
|
"learning_rate": 4.264235636489542e-05, |
|
"loss": 1.1948, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.020134228187919462, |
|
"grad_norm": 7.9364776611328125, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 2.2336, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.020507084265473527, |
|
"grad_norm": 6.744062900543213, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 2.0732, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.020879940343027592, |
|
"grad_norm": 6.412101745605469, |
|
"learning_rate": 3.244097923843398e-05, |
|
"loss": 2.3604, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.021252796420581657, |
|
"grad_norm": 7.558949947357178, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 1.5866, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.02162565249813572, |
|
"grad_norm": 5.778247356414795, |
|
"learning_rate": 2.6272266318987603e-05, |
|
"loss": 2.12, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.021998508575689784, |
|
"grad_norm": 9.419931411743164, |
|
"learning_rate": 2.339555568810221e-05, |
|
"loss": 2.0553, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.02237136465324385, |
|
"grad_norm": 7.691293716430664, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 2.2256, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02274422073079791, |
|
"grad_norm": 6.940426826477051, |
|
"learning_rate": 1.808479557110081e-05, |
|
"loss": 2.3064, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.023117076808351976, |
|
"grad_norm": 8.362802505493164, |
|
"learning_rate": 1.566085541871145e-05, |
|
"loss": 2.308, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.02348993288590604, |
|
"grad_norm": 6.858037948608398, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 1.7986, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.023862788963460103, |
|
"grad_norm": 8.502093315124512, |
|
"learning_rate": 1.129891668217783e-05, |
|
"loss": 2.4218, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.024235645041014168, |
|
"grad_norm": 4.750255584716797, |
|
"learning_rate": 9.369221296335006e-06, |
|
"loss": 1.1287, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.024608501118568233, |
|
"grad_norm": 6.325159072875977, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 1.961, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.024981357196122298, |
|
"grad_norm": 5.899595737457275, |
|
"learning_rate": 6.030737921409169e-06, |
|
"loss": 1.5426, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.02535421327367636, |
|
"grad_norm": 9.170677185058594, |
|
"learning_rate": 4.628304925177318e-06, |
|
"loss": 2.2986, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.025727069351230425, |
|
"grad_norm": 11.1466646194458, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 1.9698, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.02609992542878449, |
|
"grad_norm": 5.2693190574646, |
|
"learning_rate": 2.3703992880066638e-06, |
|
"loss": 1.1575, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.02647278150633855, |
|
"grad_norm": 17.38636016845703, |
|
"learning_rate": 1.5192246987791981e-06, |
|
"loss": 2.5558, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.026845637583892617, |
|
"grad_norm": 7.246407985687256, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 2.2891, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.027218493661446682, |
|
"grad_norm": 11.300233840942383, |
|
"learning_rate": 3.805301908254455e-07, |
|
"loss": 2.0134, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.027591349739000747, |
|
"grad_norm": 7.813751220703125, |
|
"learning_rate": 9.517784181422019e-08, |
|
"loss": 2.4345, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.02796420581655481, |
|
"grad_norm": 9.342615127563477, |
|
"learning_rate": 0.0, |
|
"loss": 1.7863, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02796420581655481, |
|
"eval_loss": 0.2411356121301651, |
|
"eval_runtime": 511.7025, |
|
"eval_samples_per_second": 4.415, |
|
"eval_steps_per_second": 2.208, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.9043369091072e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|