{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.08465011286681716, "eval_steps": 25, "global_step": 75, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.001128668171557562, "grad_norm": 46.31266784667969, "learning_rate": 6.666666666666667e-05, "loss": 11.9289, "step": 1 }, { "epoch": 0.001128668171557562, "eval_loss": 11.48071575164795, "eval_runtime": 218.574, "eval_samples_per_second": 3.418, "eval_steps_per_second": 1.711, "step": 1 }, { "epoch": 0.002257336343115124, "grad_norm": 32.02701187133789, "learning_rate": 0.00013333333333333334, "loss": 9.1416, "step": 2 }, { "epoch": 0.003386004514672686, "grad_norm": 22.43347930908203, "learning_rate": 0.0002, "loss": 7.6477, "step": 3 }, { "epoch": 0.004514672686230248, "grad_norm": 24.434974670410156, "learning_rate": 0.0001999048221581858, "loss": 7.0388, "step": 4 }, { "epoch": 0.0056433408577878106, "grad_norm": 21.187652587890625, "learning_rate": 0.00019961946980917456, "loss": 3.8237, "step": 5 }, { "epoch": 0.006772009029345372, "grad_norm": 18.851232528686523, "learning_rate": 0.00019914448613738106, "loss": 2.9445, "step": 6 }, { "epoch": 0.007900677200902935, "grad_norm": 21.697521209716797, "learning_rate": 0.00019848077530122083, "loss": 3.5231, "step": 7 }, { "epoch": 0.009029345372460496, "grad_norm": 14.041197776794434, "learning_rate": 0.00019762960071199333, "loss": 2.2053, "step": 8 }, { "epoch": 0.010158013544018058, "grad_norm": 15.442231178283691, "learning_rate": 0.00019659258262890683, "loss": 2.3755, "step": 9 }, { "epoch": 0.011286681715575621, "grad_norm": 14.979207038879395, "learning_rate": 0.0001953716950748227, "loss": 2.5331, "step": 10 }, { "epoch": 0.012415349887133182, "grad_norm": 11.316594123840332, "learning_rate": 0.00019396926207859084, "loss": 2.2067, "step": 11 }, { "epoch": 0.013544018058690745, "grad_norm": 10.44298267364502, "learning_rate": 0.0001923879532511287, "loss": 1.641, "step": 12 }, { "epoch": 0.014672686230248307, "grad_norm": 11.997466087341309, "learning_rate": 0.000190630778703665, "loss": 1.7946, "step": 13 }, { "epoch": 0.01580135440180587, "grad_norm": 12.115503311157227, "learning_rate": 0.00018870108331782217, "loss": 2.4233, "step": 14 }, { "epoch": 0.016930022573363433, "grad_norm": 11.261882781982422, "learning_rate": 0.00018660254037844388, "loss": 1.8223, "step": 15 }, { "epoch": 0.01805869074492099, "grad_norm": 10.030159950256348, "learning_rate": 0.0001843391445812886, "loss": 1.8842, "step": 16 }, { "epoch": 0.019187358916478554, "grad_norm": 12.912628173828125, "learning_rate": 0.0001819152044288992, "loss": 2.4811, "step": 17 }, { "epoch": 0.020316027088036117, "grad_norm": 14.034784317016602, "learning_rate": 0.00017933533402912354, "loss": 2.0478, "step": 18 }, { "epoch": 0.02144469525959368, "grad_norm": 11.340625762939453, "learning_rate": 0.0001766044443118978, "loss": 1.4553, "step": 19 }, { "epoch": 0.022573363431151242, "grad_norm": 13.417366981506348, "learning_rate": 0.0001737277336810124, "loss": 2.2667, "step": 20 }, { "epoch": 0.023702031602708805, "grad_norm": 10.488327026367188, "learning_rate": 0.00017071067811865476, "loss": 1.8455, "step": 21 }, { "epoch": 0.024830699774266364, "grad_norm": 8.669241905212402, "learning_rate": 0.00016755902076156604, "loss": 1.7471, "step": 22 }, { "epoch": 0.025959367945823927, "grad_norm": 8.316575050354004, "learning_rate": 0.00016427876096865394, "loss": 1.4701, "step": 23 }, { "epoch": 0.02708803611738149, "grad_norm": 9.43625259399414, "learning_rate": 0.00016087614290087208, "loss": 1.6207, "step": 24 }, { "epoch": 0.028216704288939052, "grad_norm": 8.611120223999023, "learning_rate": 0.0001573576436351046, "loss": 1.642, "step": 25 }, { "epoch": 0.028216704288939052, "eval_loss": 1.9240721464157104, "eval_runtime": 220.5343, "eval_samples_per_second": 3.387, "eval_steps_per_second": 1.696, "step": 25 }, { "epoch": 0.029345372460496615, "grad_norm": 5.593110084533691, "learning_rate": 0.0001537299608346824, "loss": 1.0647, "step": 26 }, { "epoch": 0.030474040632054177, "grad_norm": 9.355537414550781, "learning_rate": 0.00015000000000000001, "loss": 1.5519, "step": 27 }, { "epoch": 0.03160270880361174, "grad_norm": 10.389217376708984, "learning_rate": 0.00014617486132350343, "loss": 1.8895, "step": 28 }, { "epoch": 0.0327313769751693, "grad_norm": 9.101856231689453, "learning_rate": 0.00014226182617406996, "loss": 1.7764, "step": 29 }, { "epoch": 0.033860045146726865, "grad_norm": 9.08023738861084, "learning_rate": 0.000138268343236509, "loss": 1.7249, "step": 30 }, { "epoch": 0.034988713318284424, "grad_norm": 8.803888320922852, "learning_rate": 0.00013420201433256689, "loss": 1.5349, "step": 31 }, { "epoch": 0.03611738148984198, "grad_norm": 9.191908836364746, "learning_rate": 0.00013007057995042732, "loss": 1.8036, "step": 32 }, { "epoch": 0.03724604966139955, "grad_norm": 7.046676158905029, "learning_rate": 0.00012588190451025207, "loss": 1.3115, "step": 33 }, { "epoch": 0.03837471783295711, "grad_norm": 8.129071235656738, "learning_rate": 0.00012164396139381029, "loss": 1.7446, "step": 34 }, { "epoch": 0.039503386004514675, "grad_norm": 7.32548189163208, "learning_rate": 0.00011736481776669306, "loss": 1.2946, "step": 35 }, { "epoch": 0.040632054176072234, "grad_norm": 8.391158103942871, "learning_rate": 0.00011305261922200519, "loss": 1.6682, "step": 36 }, { "epoch": 0.0417607223476298, "grad_norm": 9.171820640563965, "learning_rate": 0.00010871557427476583, "loss": 1.6177, "step": 37 }, { "epoch": 0.04288939051918736, "grad_norm": 8.600780487060547, "learning_rate": 0.00010436193873653361, "loss": 2.033, "step": 38 }, { "epoch": 0.04401805869074492, "grad_norm": 7.288829326629639, "learning_rate": 0.0001, "loss": 1.6004, "step": 39 }, { "epoch": 0.045146726862302484, "grad_norm": 10.171875953674316, "learning_rate": 9.563806126346642e-05, "loss": 1.6914, "step": 40 }, { "epoch": 0.046275395033860044, "grad_norm": 5.748844623565674, "learning_rate": 9.128442572523417e-05, "loss": 1.0927, "step": 41 }, { "epoch": 0.04740406320541761, "grad_norm": 7.213958263397217, "learning_rate": 8.694738077799488e-05, "loss": 1.536, "step": 42 }, { "epoch": 0.04853273137697517, "grad_norm": 8.207345962524414, "learning_rate": 8.263518223330697e-05, "loss": 1.4197, "step": 43 }, { "epoch": 0.04966139954853273, "grad_norm": 9.604900360107422, "learning_rate": 7.835603860618972e-05, "loss": 1.743, "step": 44 }, { "epoch": 0.050790067720090294, "grad_norm": 7.322094440460205, "learning_rate": 7.411809548974792e-05, "loss": 1.522, "step": 45 }, { "epoch": 0.05191873589164785, "grad_norm": 8.541365623474121, "learning_rate": 6.992942004957271e-05, "loss": 1.1983, "step": 46 }, { "epoch": 0.05304740406320542, "grad_norm": 7.738641262054443, "learning_rate": 6.579798566743314e-05, "loss": 1.1686, "step": 47 }, { "epoch": 0.05417607223476298, "grad_norm": 9.32544231414795, "learning_rate": 6.173165676349103e-05, "loss": 1.9263, "step": 48 }, { "epoch": 0.055304740406320545, "grad_norm": 9.188823699951172, "learning_rate": 5.773817382593008e-05, "loss": 1.7528, "step": 49 }, { "epoch": 0.056433408577878104, "grad_norm": 6.969271183013916, "learning_rate": 5.382513867649663e-05, "loss": 1.4092, "step": 50 }, { "epoch": 0.056433408577878104, "eval_loss": 1.6539942026138306, "eval_runtime": 220.8995, "eval_samples_per_second": 3.382, "eval_steps_per_second": 1.693, "step": 50 }, { "epoch": 0.05756207674943566, "grad_norm": 7.862215995788574, "learning_rate": 5.000000000000002e-05, "loss": 1.3288, "step": 51 }, { "epoch": 0.05869074492099323, "grad_norm": 6.700693130493164, "learning_rate": 4.6270039165317605e-05, "loss": 1.3217, "step": 52 }, { "epoch": 0.05981941309255079, "grad_norm": 8.424921035766602, "learning_rate": 4.264235636489542e-05, "loss": 1.4351, "step": 53 }, { "epoch": 0.060948081264108354, "grad_norm": 8.358750343322754, "learning_rate": 3.9123857099127936e-05, "loss": 1.9325, "step": 54 }, { "epoch": 0.062076749435665914, "grad_norm": 8.291664123535156, "learning_rate": 3.5721239031346066e-05, "loss": 1.1282, "step": 55 }, { "epoch": 0.06320541760722348, "grad_norm": 6.986010551452637, "learning_rate": 3.244097923843398e-05, "loss": 1.2154, "step": 56 }, { "epoch": 0.06433408577878104, "grad_norm": 5.823822975158691, "learning_rate": 2.9289321881345254e-05, "loss": 1.2519, "step": 57 }, { "epoch": 0.0654627539503386, "grad_norm": 7.550477027893066, "learning_rate": 2.6272266318987603e-05, "loss": 1.4156, "step": 58 }, { "epoch": 0.06659142212189616, "grad_norm": 6.91257381439209, "learning_rate": 2.339555568810221e-05, "loss": 1.4908, "step": 59 }, { "epoch": 0.06772009029345373, "grad_norm": 9.72311782836914, "learning_rate": 2.0664665970876496e-05, "loss": 1.7683, "step": 60 }, { "epoch": 0.06884875846501129, "grad_norm": 8.531071662902832, "learning_rate": 1.808479557110081e-05, "loss": 1.6604, "step": 61 }, { "epoch": 0.06997742663656885, "grad_norm": 6.506885051727295, "learning_rate": 1.566085541871145e-05, "loss": 1.1158, "step": 62 }, { "epoch": 0.07110609480812641, "grad_norm": 6.313655853271484, "learning_rate": 1.339745962155613e-05, "loss": 1.3392, "step": 63 }, { "epoch": 0.07223476297968397, "grad_norm": 6.619772911071777, "learning_rate": 1.129891668217783e-05, "loss": 1.3964, "step": 64 }, { "epoch": 0.07336343115124154, "grad_norm": 5.63805627822876, "learning_rate": 9.369221296335006e-06, "loss": 0.8831, "step": 65 }, { "epoch": 0.0744920993227991, "grad_norm": 8.772890090942383, "learning_rate": 7.612046748871327e-06, "loss": 1.6972, "step": 66 }, { "epoch": 0.07562076749435666, "grad_norm": 7.003053188323975, "learning_rate": 6.030737921409169e-06, "loss": 1.1805, "step": 67 }, { "epoch": 0.07674943566591422, "grad_norm": 6.298540115356445, "learning_rate": 4.628304925177318e-06, "loss": 1.3526, "step": 68 }, { "epoch": 0.07787810383747178, "grad_norm": 7.14997673034668, "learning_rate": 3.40741737109318e-06, "loss": 1.5094, "step": 69 }, { "epoch": 0.07900677200902935, "grad_norm": 7.56989860534668, "learning_rate": 2.3703992880066638e-06, "loss": 1.6164, "step": 70 }, { "epoch": 0.08013544018058691, "grad_norm": 6.899630546569824, "learning_rate": 1.5192246987791981e-06, "loss": 1.2452, "step": 71 }, { "epoch": 0.08126410835214447, "grad_norm": 6.794870853424072, "learning_rate": 8.555138626189618e-07, "loss": 1.6971, "step": 72 }, { "epoch": 0.08239277652370203, "grad_norm": 6.301301956176758, "learning_rate": 3.805301908254455e-07, "loss": 1.5406, "step": 73 }, { "epoch": 0.0835214446952596, "grad_norm": 6.646534442901611, "learning_rate": 9.517784181422019e-08, "loss": 1.4273, "step": 74 }, { "epoch": 0.08465011286681716, "grad_norm": 6.380800724029541, "learning_rate": 0.0, "loss": 1.2305, "step": 75 }, { "epoch": 0.08465011286681716, "eval_loss": 1.612352728843689, "eval_runtime": 220.667, "eval_samples_per_second": 3.385, "eval_steps_per_second": 1.695, "step": 75 } ], "logging_steps": 1, "max_steps": 75, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.150341542313984e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }