|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07146260123868509, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0009528346831824678, |
|
"grad_norm": 0.6638647317886353, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.7136, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009528346831824678, |
|
"eval_loss": NaN, |
|
"eval_runtime": 190.8211, |
|
"eval_samples_per_second": 4.633, |
|
"eval_steps_per_second": 2.316, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0019056693663649356, |
|
"grad_norm": 1.9228756427764893, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 2.09, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0028585040495474035, |
|
"grad_norm": 1.071838617324829, |
|
"learning_rate": 0.0001, |
|
"loss": 2.5374, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003811338732729871, |
|
"grad_norm": 1.0017715692520142, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 2.434, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.004764173415912339, |
|
"grad_norm": 0.9518014788627625, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 2.4302, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.005717008099094807, |
|
"grad_norm": 1.4896080493927002, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 2.3739, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.006669842782277275, |
|
"grad_norm": 0.9383845329284668, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.6915, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.007622677465459742, |
|
"grad_norm": 1.1015162467956543, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 2.5915, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00857551214864221, |
|
"grad_norm": 1.1465002298355103, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 2.4781, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.009528346831824679, |
|
"grad_norm": 1.1309946775436401, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 2.4019, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.010481181515007145, |
|
"grad_norm": 1.258188009262085, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.5504, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.011434016198189614, |
|
"grad_norm": 1.2431732416152954, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.3838, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.012386850881372083, |
|
"grad_norm": 1.3730213642120361, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 2.3747, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01333968556455455, |
|
"grad_norm": 1.4390398263931274, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 2.4125, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.014292520247737018, |
|
"grad_norm": 1.2996867895126343, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.2455, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.015245354930919485, |
|
"grad_norm": 1.3530491590499878, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 2.5976, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.016198189614101955, |
|
"grad_norm": 1.3274792432785034, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 2.5624, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01715102429728442, |
|
"grad_norm": 1.6145275831222534, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 2.4993, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01810385898046689, |
|
"grad_norm": 1.4638392925262451, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.4422, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.019056693663649357, |
|
"grad_norm": 1.3674169778823853, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 2.4834, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.020009528346831826, |
|
"grad_norm": 1.5228791236877441, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.5919, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02096236303001429, |
|
"grad_norm": 1.3580362796783447, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 2.401, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02191519771319676, |
|
"grad_norm": 1.3920480012893677, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 2.3787, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.022868032396379228, |
|
"grad_norm": 1.4610280990600586, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 2.3116, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.023820867079561697, |
|
"grad_norm": 1.4679853916168213, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 2.5363, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.023820867079561697, |
|
"eval_loss": NaN, |
|
"eval_runtime": 189.9742, |
|
"eval_samples_per_second": 4.653, |
|
"eval_steps_per_second": 2.327, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.024773701762744165, |
|
"grad_norm": 1.4650429487228394, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 2.453, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02572653644592663, |
|
"grad_norm": 1.5379657745361328, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.4569, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0266793711291091, |
|
"grad_norm": 1.4416159391403198, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 2.0689, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.027632205812291567, |
|
"grad_norm": 1.9543250799179077, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 2.7355, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.028585040495474036, |
|
"grad_norm": 4.280240058898926, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 2.3589, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.029537875178656504, |
|
"grad_norm": 2.5730347633361816, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.5142, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03049070986183897, |
|
"grad_norm": 1.9060622453689575, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 2.4468, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03144354454502144, |
|
"grad_norm": 2.6304562091827393, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 2.5699, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03239637922820391, |
|
"grad_norm": 2.1982674598693848, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 2.5823, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03334921391138637, |
|
"grad_norm": 1.9779804944992065, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.0681, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03430204859456884, |
|
"grad_norm": 2.2652299404144287, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 2.5656, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03525488327775131, |
|
"grad_norm": 2.288848400115967, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 2.442, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03620771796093378, |
|
"grad_norm": 2.1887640953063965, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 2.4672, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.037160552644116246, |
|
"grad_norm": 2.69694185256958, |
|
"learning_rate": 5e-05, |
|
"loss": 2.8142, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.038113387327298714, |
|
"grad_norm": 2.768056869506836, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 2.7236, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03906622201048118, |
|
"grad_norm": 4.088725566864014, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 2.5499, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04001905669366365, |
|
"grad_norm": 3.419790744781494, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 2.9799, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04097189137684612, |
|
"grad_norm": 6.255226135253906, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.883, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04192472606002858, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04287756074321105, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04383039542639352, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04478323010957599, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.045736064792758456, |
|
"grad_norm": 8.734387397766113, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.4118, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.046688899475940925, |
|
"grad_norm": 3.778465509414673, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 2.7381, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.04764173415912339, |
|
"grad_norm": 2.9656107425689697, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 2.6656, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04764173415912339, |
|
"eval_loss": NaN, |
|
"eval_runtime": 190.1935, |
|
"eval_samples_per_second": 4.648, |
|
"eval_steps_per_second": 2.324, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04859456884230586, |
|
"grad_norm": 5.057662487030029, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.1957, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04954740352548833, |
|
"grad_norm": 2.368539571762085, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 2.2245, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0505002382086708, |
|
"grad_norm": 2.0508615970611572, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 2.2957, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.05145307289185326, |
|
"grad_norm": 1.9854440689086914, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 2.1347, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.05240590757503573, |
|
"grad_norm": 1.7406495809555054, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 2.2859, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0533587422582182, |
|
"grad_norm": 1.5683196783065796, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 2.342, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.054311576941400666, |
|
"grad_norm": 1.467210292816162, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.331, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.055264411624583135, |
|
"grad_norm": 1.634995698928833, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 2.1551, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0562172463077656, |
|
"grad_norm": 1.3047391176223755, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 2.2066, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.05717008099094807, |
|
"grad_norm": 1.1119383573532104, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.0849, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05812291567413054, |
|
"grad_norm": 1.1421319246292114, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 2.1172, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.05907575035731301, |
|
"grad_norm": 1.152631402015686, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 2.4149, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.06002858504049548, |
|
"grad_norm": 1.3147021532058716, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.3454, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.06098141972367794, |
|
"grad_norm": 1.3595980405807495, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 2.0931, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.06193425440686041, |
|
"grad_norm": 2.0441415309906006, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 2.3878, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.06288708909004288, |
|
"grad_norm": 1.2968662977218628, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.1372, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.06383992377322535, |
|
"grad_norm": 1.2718733549118042, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 2.2646, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.06479275845640782, |
|
"grad_norm": 1.2259955406188965, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 2.2772, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.06574559313959027, |
|
"grad_norm": 1.2223516702651978, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 2.2698, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.06669842782277274, |
|
"grad_norm": 1.2529011964797974, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 2.2766, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.06765126250595521, |
|
"grad_norm": 1.4887171983718872, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 2.2977, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.06860409718913768, |
|
"grad_norm": 1.5588957071304321, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 2.3476, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.06955693187232015, |
|
"grad_norm": 1.3241275548934937, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 2.3195, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.07050976655550262, |
|
"grad_norm": 1.6149224042892456, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 2.4973, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.07146260123868509, |
|
"grad_norm": 1.5767426490783691, |
|
"learning_rate": 0.0, |
|
"loss": 2.4994, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.07146260123868509, |
|
"eval_loss": NaN, |
|
"eval_runtime": 189.8117, |
|
"eval_samples_per_second": 4.657, |
|
"eval_steps_per_second": 2.329, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.1242647625387213e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|