|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.04764173415912339, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0009528346831824678, |
|
"grad_norm": 1.3314216136932373, |
|
"learning_rate": 1e-05, |
|
"loss": 2.8561, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009528346831824678, |
|
"eval_loss": 3.010896921157837, |
|
"eval_runtime": 194.9161, |
|
"eval_samples_per_second": 4.535, |
|
"eval_steps_per_second": 0.569, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0019056693663649356, |
|
"grad_norm": 1.108765721321106, |
|
"learning_rate": 2e-05, |
|
"loss": 2.8611, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0028585040495474035, |
|
"grad_norm": 1.0992165803909302, |
|
"learning_rate": 3e-05, |
|
"loss": 2.958, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003811338732729871, |
|
"grad_norm": 0.9974603652954102, |
|
"learning_rate": 4e-05, |
|
"loss": 2.8721, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.004764173415912339, |
|
"grad_norm": 1.0912249088287354, |
|
"learning_rate": 5e-05, |
|
"loss": 2.8375, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.005717008099094807, |
|
"grad_norm": 1.070563793182373, |
|
"learning_rate": 6e-05, |
|
"loss": 2.7195, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.006669842782277275, |
|
"grad_norm": 1.2317885160446167, |
|
"learning_rate": 7e-05, |
|
"loss": 2.816, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.007622677465459742, |
|
"grad_norm": 1.3422398567199707, |
|
"learning_rate": 8e-05, |
|
"loss": 2.6737, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00857551214864221, |
|
"grad_norm": 1.3986526727676392, |
|
"learning_rate": 9e-05, |
|
"loss": 2.7706, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00857551214864221, |
|
"eval_loss": 2.8102166652679443, |
|
"eval_runtime": 195.3531, |
|
"eval_samples_per_second": 4.525, |
|
"eval_steps_per_second": 0.568, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.009528346831824679, |
|
"grad_norm": 1.2939263582229614, |
|
"learning_rate": 0.0001, |
|
"loss": 2.8952, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.010481181515007145, |
|
"grad_norm": 1.1111377477645874, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.7657, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.011434016198189614, |
|
"grad_norm": 1.2477428913116455, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.4967, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.012386850881372083, |
|
"grad_norm": 1.0746080875396729, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 2.359, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01333968556455455, |
|
"grad_norm": 1.6572176218032837, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.608, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.014292520247737018, |
|
"grad_norm": 1.4233778715133667, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.4839, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.015245354930919485, |
|
"grad_norm": 1.346785306930542, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.4763, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.016198189614101955, |
|
"grad_norm": 1.2152442932128906, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.428, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01715102429728442, |
|
"grad_norm": 1.0687611103057861, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.4665, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01715102429728442, |
|
"eval_loss": 2.5243494510650635, |
|
"eval_runtime": 195.3053, |
|
"eval_samples_per_second": 4.526, |
|
"eval_steps_per_second": 0.568, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01810385898046689, |
|
"grad_norm": 1.1508054733276367, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.3153, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.019056693663649357, |
|
"grad_norm": 1.267975091934204, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.4924, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.020009528346831826, |
|
"grad_norm": 1.2117677927017212, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.4421, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02096236303001429, |
|
"grad_norm": 1.7183105945587158, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.6475, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02191519771319676, |
|
"grad_norm": 1.2931954860687256, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.0165, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.022868032396379228, |
|
"grad_norm": 1.4074746370315552, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.8582, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.023820867079561697, |
|
"grad_norm": 1.149705410003662, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.2325, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.024773701762744165, |
|
"grad_norm": 1.1608455181121826, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.4246, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02572653644592663, |
|
"grad_norm": 1.520585298538208, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.7656, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02572653644592663, |
|
"eval_loss": 2.388444423675537, |
|
"eval_runtime": 195.2733, |
|
"eval_samples_per_second": 4.527, |
|
"eval_steps_per_second": 0.568, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0266793711291091, |
|
"grad_norm": 1.0983319282531738, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.0036, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.027632205812291567, |
|
"grad_norm": 1.2645291090011597, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 2.3768, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.028585040495474036, |
|
"grad_norm": 1.0958126783370972, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.1706, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.029537875178656504, |
|
"grad_norm": 1.6864413022994995, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 2.7613, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03049070986183897, |
|
"grad_norm": 8.486473083496094, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.4582, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03144354454502144, |
|
"grad_norm": 1.4310030937194824, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 2.5887, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03239637922820391, |
|
"grad_norm": 1.9405314922332764, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.3926, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03334921391138637, |
|
"grad_norm": 1.6728031635284424, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 2.3928, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03430204859456884, |
|
"grad_norm": 2.5665833950042725, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.5681, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03430204859456884, |
|
"eval_loss": 2.3183698654174805, |
|
"eval_runtime": 195.29, |
|
"eval_samples_per_second": 4.527, |
|
"eval_steps_per_second": 0.568, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03525488327775131, |
|
"grad_norm": 3.006268262863159, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.2294, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03620771796093378, |
|
"grad_norm": 1.0738325119018555, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.3217, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.037160552644116246, |
|
"grad_norm": 1.1440376043319702, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.1976, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.038113387327298714, |
|
"grad_norm": 1.2780272960662842, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.2665, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03906622201048118, |
|
"grad_norm": 1.0667051076889038, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 2.2043, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04001905669366365, |
|
"grad_norm": 1.230981707572937, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.9738, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04097189137684612, |
|
"grad_norm": 1.1416908502578735, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 2.256, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04192472606002858, |
|
"grad_norm": 1.4842567443847656, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.2914, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04287756074321105, |
|
"grad_norm": 1.039803147315979, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.1142, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04287756074321105, |
|
"eval_loss": 2.283740520477295, |
|
"eval_runtime": 195.2949, |
|
"eval_samples_per_second": 4.526, |
|
"eval_steps_per_second": 0.568, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04383039542639352, |
|
"grad_norm": 2.066823720932007, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.2865, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04478323010957599, |
|
"grad_norm": 1.3638314008712769, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 2.1797, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.045736064792758456, |
|
"grad_norm": 1.3101152181625366, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.3253, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.046688899475940925, |
|
"grad_norm": 1.1173464059829712, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 2.1469, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.04764173415912339, |
|
"grad_norm": 0.9268816113471985, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.0037, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.41887283560448e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|