|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.028276544606249118, |
|
"eval_steps": 500, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005655308921249824, |
|
"grad_norm": 9.447598457336426, |
|
"learning_rate": 0.0001, |
|
"loss": 7.5877, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011310617842499647, |
|
"grad_norm": 12.224023818969727, |
|
"learning_rate": 0.0002, |
|
"loss": 7.4966, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001696592676374947, |
|
"grad_norm": 8.18627643585205, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 6.524, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0022621235684999294, |
|
"grad_norm": 9.624427795410156, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 4.5356, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0028276544606249117, |
|
"grad_norm": 8.553901672363281, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 4.2528, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003393185352749894, |
|
"grad_norm": 5.756651401519775, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 4.1727, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003958716244874877, |
|
"grad_norm": 2.862290143966675, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 4.1679, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.004524247136999859, |
|
"grad_norm": 1.6996726989746094, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 4.1149, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005089778029124841, |
|
"grad_norm": 2.1115152835845947, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 3.8839, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.005655308921249823, |
|
"grad_norm": 1.4950510263442993, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 4.0708, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006220839813374806, |
|
"grad_norm": 3.2026827335357666, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 4.2101, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.006786370705499788, |
|
"grad_norm": 1.4218138456344604, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 4.0784, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00735190159762477, |
|
"grad_norm": 2.44765043258667, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 4.079, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.007917432489749753, |
|
"grad_norm": 1.5132642984390259, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 4.1191, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.008482963381874735, |
|
"grad_norm": 1.7532415390014648, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 3.9846, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009048494273999718, |
|
"grad_norm": 1.528114914894104, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 4.0772, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.009614025166124699, |
|
"grad_norm": 1.0905526876449585, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 4.1484, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.010179556058249682, |
|
"grad_norm": 1.0690267086029053, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 4.01, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.010745086950374664, |
|
"grad_norm": 0.7005692720413208, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 3.9638, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.011310617842499647, |
|
"grad_norm": 0.8180456757545471, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 4.0638, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011876148734624628, |
|
"grad_norm": 0.7685333490371704, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 4.0947, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.012441679626749611, |
|
"grad_norm": 0.8267884850502014, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 4.1512, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.013007210518874594, |
|
"grad_norm": 0.80919349193573, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 4.0781, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.013572741410999576, |
|
"grad_norm": 0.862808108329773, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 4.0492, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.014138272303124559, |
|
"grad_norm": 1.0544193983078003, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 3.9988, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01470380319524954, |
|
"grad_norm": 0.8450819253921509, |
|
"learning_rate": 0.0001, |
|
"loss": 4.0739, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.015269334087374523, |
|
"grad_norm": 1.9969067573547363, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 3.9674, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.015834864979499506, |
|
"grad_norm": 0.7638323903083801, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 3.9486, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.016400395871624486, |
|
"grad_norm": 0.6288601756095886, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 3.8704, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01696592676374947, |
|
"grad_norm": 1.469429612159729, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 4.1891, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.017531457655874452, |
|
"grad_norm": 1.1576073169708252, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 3.986, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.018096988547999435, |
|
"grad_norm": 0.9627673029899597, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 4.1144, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.01866251944012442, |
|
"grad_norm": 0.7569557428359985, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 4.0838, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.019228050332249398, |
|
"grad_norm": 0.7780810594558716, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 4.0803, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01979358122437438, |
|
"grad_norm": 1.5027967691421509, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 3.9828, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.020359112116499364, |
|
"grad_norm": 0.9394671320915222, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 4.0787, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.020924643008624347, |
|
"grad_norm": 0.8779884576797485, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 4.0903, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.021490173900749327, |
|
"grad_norm": 1.4611361026763916, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 3.9183, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.02205570479287431, |
|
"grad_norm": 0.7395803928375244, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 4.1387, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.022621235684999293, |
|
"grad_norm": 0.8143374919891357, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 4.1567, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.023186766577124276, |
|
"grad_norm": 0.7555928826332092, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 4.0109, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.023752297469249256, |
|
"grad_norm": 0.749727189540863, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 4.0942, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.02431782836137424, |
|
"grad_norm": 2.71633243560791, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 3.978, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.024883359253499222, |
|
"grad_norm": 0.856410026550293, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 3.8863, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.025448890145624205, |
|
"grad_norm": 0.9397003054618835, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 3.8758, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02601442103774919, |
|
"grad_norm": 0.8372579216957092, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 4.1118, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.026579951929874168, |
|
"grad_norm": 0.6836534142494202, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 4.0193, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.02714548282199915, |
|
"grad_norm": 0.7352842688560486, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 3.9597, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.027711013714124134, |
|
"grad_norm": 1.5941392183303833, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 4.1411, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.028276544606249118, |
|
"grad_norm": 1.6705574989318848, |
|
"learning_rate": 0.0, |
|
"loss": 4.1692, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.23612542025728e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|