lesso11's picture
Training in progress, step 75, checkpoint
534128e verified
raw
history blame
15.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07146260123868509,
"eval_steps": 9,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009528346831824678,
"grad_norm": 1.3314216136932373,
"learning_rate": 1e-05,
"loss": 2.8561,
"step": 1
},
{
"epoch": 0.0009528346831824678,
"eval_loss": 3.010896921157837,
"eval_runtime": 194.9161,
"eval_samples_per_second": 4.535,
"eval_steps_per_second": 0.569,
"step": 1
},
{
"epoch": 0.0019056693663649356,
"grad_norm": 1.108765721321106,
"learning_rate": 2e-05,
"loss": 2.8611,
"step": 2
},
{
"epoch": 0.0028585040495474035,
"grad_norm": 1.0992165803909302,
"learning_rate": 3e-05,
"loss": 2.958,
"step": 3
},
{
"epoch": 0.003811338732729871,
"grad_norm": 0.9974603652954102,
"learning_rate": 4e-05,
"loss": 2.8721,
"step": 4
},
{
"epoch": 0.004764173415912339,
"grad_norm": 1.0912249088287354,
"learning_rate": 5e-05,
"loss": 2.8375,
"step": 5
},
{
"epoch": 0.005717008099094807,
"grad_norm": 1.070563793182373,
"learning_rate": 6e-05,
"loss": 2.7195,
"step": 6
},
{
"epoch": 0.006669842782277275,
"grad_norm": 1.2317885160446167,
"learning_rate": 7e-05,
"loss": 2.816,
"step": 7
},
{
"epoch": 0.007622677465459742,
"grad_norm": 1.3422398567199707,
"learning_rate": 8e-05,
"loss": 2.6737,
"step": 8
},
{
"epoch": 0.00857551214864221,
"grad_norm": 1.3986526727676392,
"learning_rate": 9e-05,
"loss": 2.7706,
"step": 9
},
{
"epoch": 0.00857551214864221,
"eval_loss": 2.8102166652679443,
"eval_runtime": 195.3531,
"eval_samples_per_second": 4.525,
"eval_steps_per_second": 0.568,
"step": 9
},
{
"epoch": 0.009528346831824679,
"grad_norm": 1.2939263582229614,
"learning_rate": 0.0001,
"loss": 2.8952,
"step": 10
},
{
"epoch": 0.010481181515007145,
"grad_norm": 1.1111377477645874,
"learning_rate": 9.99695413509548e-05,
"loss": 2.7657,
"step": 11
},
{
"epoch": 0.011434016198189614,
"grad_norm": 1.2477428913116455,
"learning_rate": 9.987820251299122e-05,
"loss": 2.4967,
"step": 12
},
{
"epoch": 0.012386850881372083,
"grad_norm": 1.0746080875396729,
"learning_rate": 9.972609476841367e-05,
"loss": 2.359,
"step": 13
},
{
"epoch": 0.01333968556455455,
"grad_norm": 1.6572176218032837,
"learning_rate": 9.951340343707852e-05,
"loss": 2.608,
"step": 14
},
{
"epoch": 0.014292520247737018,
"grad_norm": 1.4233778715133667,
"learning_rate": 9.924038765061042e-05,
"loss": 2.4839,
"step": 15
},
{
"epoch": 0.015245354930919485,
"grad_norm": 1.346785306930542,
"learning_rate": 9.890738003669029e-05,
"loss": 2.4763,
"step": 16
},
{
"epoch": 0.016198189614101955,
"grad_norm": 1.2152442932128906,
"learning_rate": 9.851478631379982e-05,
"loss": 2.428,
"step": 17
},
{
"epoch": 0.01715102429728442,
"grad_norm": 1.0687611103057861,
"learning_rate": 9.806308479691595e-05,
"loss": 2.4665,
"step": 18
},
{
"epoch": 0.01715102429728442,
"eval_loss": 2.5243494510650635,
"eval_runtime": 195.3053,
"eval_samples_per_second": 4.526,
"eval_steps_per_second": 0.568,
"step": 18
},
{
"epoch": 0.01810385898046689,
"grad_norm": 1.1508054733276367,
"learning_rate": 9.755282581475769e-05,
"loss": 2.3153,
"step": 19
},
{
"epoch": 0.019056693663649357,
"grad_norm": 1.267975091934204,
"learning_rate": 9.698463103929542e-05,
"loss": 2.4924,
"step": 20
},
{
"epoch": 0.020009528346831826,
"grad_norm": 1.2117677927017212,
"learning_rate": 9.635919272833938e-05,
"loss": 2.4421,
"step": 21
},
{
"epoch": 0.02096236303001429,
"grad_norm": 1.7183105945587158,
"learning_rate": 9.567727288213005e-05,
"loss": 2.6475,
"step": 22
},
{
"epoch": 0.02191519771319676,
"grad_norm": 1.2931954860687256,
"learning_rate": 9.493970231495835e-05,
"loss": 2.0165,
"step": 23
},
{
"epoch": 0.022868032396379228,
"grad_norm": 1.4074746370315552,
"learning_rate": 9.414737964294636e-05,
"loss": 2.8582,
"step": 24
},
{
"epoch": 0.023820867079561697,
"grad_norm": 1.149705410003662,
"learning_rate": 9.330127018922194e-05,
"loss": 2.2325,
"step": 25
},
{
"epoch": 0.024773701762744165,
"grad_norm": 1.1608455181121826,
"learning_rate": 9.24024048078213e-05,
"loss": 2.4246,
"step": 26
},
{
"epoch": 0.02572653644592663,
"grad_norm": 1.520585298538208,
"learning_rate": 9.145187862775209e-05,
"loss": 2.7656,
"step": 27
},
{
"epoch": 0.02572653644592663,
"eval_loss": 2.388444423675537,
"eval_runtime": 195.2733,
"eval_samples_per_second": 4.527,
"eval_steps_per_second": 0.568,
"step": 27
},
{
"epoch": 0.0266793711291091,
"grad_norm": 1.0983319282531738,
"learning_rate": 9.045084971874738e-05,
"loss": 2.0036,
"step": 28
},
{
"epoch": 0.027632205812291567,
"grad_norm": 1.2645291090011597,
"learning_rate": 8.940053768033609e-05,
"loss": 2.3768,
"step": 29
},
{
"epoch": 0.028585040495474036,
"grad_norm": 1.0958126783370972,
"learning_rate": 8.83022221559489e-05,
"loss": 2.1706,
"step": 30
},
{
"epoch": 0.029537875178656504,
"grad_norm": 1.6864413022994995,
"learning_rate": 8.715724127386972e-05,
"loss": 2.7613,
"step": 31
},
{
"epoch": 0.03049070986183897,
"grad_norm": 8.486473083496094,
"learning_rate": 8.596699001693255e-05,
"loss": 2.4582,
"step": 32
},
{
"epoch": 0.03144354454502144,
"grad_norm": 1.4310030937194824,
"learning_rate": 8.473291852294987e-05,
"loss": 2.5887,
"step": 33
},
{
"epoch": 0.03239637922820391,
"grad_norm": 1.9405314922332764,
"learning_rate": 8.345653031794292e-05,
"loss": 2.3926,
"step": 34
},
{
"epoch": 0.03334921391138637,
"grad_norm": 1.6728031635284424,
"learning_rate": 8.213938048432697e-05,
"loss": 2.3928,
"step": 35
},
{
"epoch": 0.03430204859456884,
"grad_norm": 2.5665833950042725,
"learning_rate": 8.07830737662829e-05,
"loss": 2.5681,
"step": 36
},
{
"epoch": 0.03430204859456884,
"eval_loss": 2.3183698654174805,
"eval_runtime": 195.29,
"eval_samples_per_second": 4.527,
"eval_steps_per_second": 0.568,
"step": 36
},
{
"epoch": 0.03525488327775131,
"grad_norm": 3.006268262863159,
"learning_rate": 7.938926261462366e-05,
"loss": 2.2294,
"step": 37
},
{
"epoch": 0.03620771796093378,
"grad_norm": 1.0738325119018555,
"learning_rate": 7.795964517353735e-05,
"loss": 2.3217,
"step": 38
},
{
"epoch": 0.037160552644116246,
"grad_norm": 1.1440376043319702,
"learning_rate": 7.649596321166024e-05,
"loss": 2.1976,
"step": 39
},
{
"epoch": 0.038113387327298714,
"grad_norm": 1.2780272960662842,
"learning_rate": 7.500000000000001e-05,
"loss": 2.2665,
"step": 40
},
{
"epoch": 0.03906622201048118,
"grad_norm": 1.0667051076889038,
"learning_rate": 7.347357813929454e-05,
"loss": 2.2043,
"step": 41
},
{
"epoch": 0.04001905669366365,
"grad_norm": 1.230981707572937,
"learning_rate": 7.191855733945387e-05,
"loss": 1.9738,
"step": 42
},
{
"epoch": 0.04097189137684612,
"grad_norm": 1.1416908502578735,
"learning_rate": 7.033683215379002e-05,
"loss": 2.256,
"step": 43
},
{
"epoch": 0.04192472606002858,
"grad_norm": 1.4842567443847656,
"learning_rate": 6.873032967079561e-05,
"loss": 2.2914,
"step": 44
},
{
"epoch": 0.04287756074321105,
"grad_norm": 1.039803147315979,
"learning_rate": 6.710100716628344e-05,
"loss": 2.1142,
"step": 45
},
{
"epoch": 0.04287756074321105,
"eval_loss": 2.283740520477295,
"eval_runtime": 195.2949,
"eval_samples_per_second": 4.526,
"eval_steps_per_second": 0.568,
"step": 45
},
{
"epoch": 0.04383039542639352,
"grad_norm": 2.066823720932007,
"learning_rate": 6.545084971874738e-05,
"loss": 2.2865,
"step": 46
},
{
"epoch": 0.04478323010957599,
"grad_norm": 1.3638314008712769,
"learning_rate": 6.378186779084995e-05,
"loss": 2.1797,
"step": 47
},
{
"epoch": 0.045736064792758456,
"grad_norm": 1.3101152181625366,
"learning_rate": 6.209609477998338e-05,
"loss": 2.3253,
"step": 48
},
{
"epoch": 0.046688899475940925,
"grad_norm": 1.1173464059829712,
"learning_rate": 6.0395584540887963e-05,
"loss": 2.1469,
"step": 49
},
{
"epoch": 0.04764173415912339,
"grad_norm": 0.9268816113471985,
"learning_rate": 5.868240888334653e-05,
"loss": 2.0037,
"step": 50
},
{
"epoch": 0.04859456884230586,
"grad_norm": 0.9991522431373596,
"learning_rate": 5.695865504800327e-05,
"loss": 2.2485,
"step": 51
},
{
"epoch": 0.04954740352548833,
"grad_norm": 1.1253869533538818,
"learning_rate": 5.522642316338268e-05,
"loss": 2.0507,
"step": 52
},
{
"epoch": 0.0505002382086708,
"grad_norm": 0.9809867739677429,
"learning_rate": 5.348782368720626e-05,
"loss": 2.038,
"step": 53
},
{
"epoch": 0.05145307289185326,
"grad_norm": 1.0923676490783691,
"learning_rate": 5.174497483512506e-05,
"loss": 2.4383,
"step": 54
},
{
"epoch": 0.05145307289185326,
"eval_loss": 2.2689011096954346,
"eval_runtime": 195.1248,
"eval_samples_per_second": 4.53,
"eval_steps_per_second": 0.569,
"step": 54
},
{
"epoch": 0.05240590757503573,
"grad_norm": 1.0306025743484497,
"learning_rate": 5e-05,
"loss": 2.1825,
"step": 55
},
{
"epoch": 0.0533587422582182,
"grad_norm": 1.4191091060638428,
"learning_rate": 4.825502516487497e-05,
"loss": 2.6339,
"step": 56
},
{
"epoch": 0.054311576941400666,
"grad_norm": 1.2226223945617676,
"learning_rate": 4.6512176312793736e-05,
"loss": 2.204,
"step": 57
},
{
"epoch": 0.055264411624583135,
"grad_norm": 5.3369951248168945,
"learning_rate": 4.477357683661734e-05,
"loss": 2.0067,
"step": 58
},
{
"epoch": 0.0562172463077656,
"grad_norm": 1.2273885011672974,
"learning_rate": 4.3041344951996746e-05,
"loss": 2.2786,
"step": 59
},
{
"epoch": 0.05717008099094807,
"grad_norm": 1.1748491525650024,
"learning_rate": 4.131759111665349e-05,
"loss": 2.415,
"step": 60
},
{
"epoch": 0.05812291567413054,
"grad_norm": 1.1600786447525024,
"learning_rate": 3.960441545911204e-05,
"loss": 2.2855,
"step": 61
},
{
"epoch": 0.05907575035731301,
"grad_norm": 1.1740864515304565,
"learning_rate": 3.790390522001662e-05,
"loss": 2.2457,
"step": 62
},
{
"epoch": 0.06002858504049548,
"grad_norm": 0.8164812922477722,
"learning_rate": 3.6218132209150045e-05,
"loss": 2.0255,
"step": 63
},
{
"epoch": 0.06002858504049548,
"eval_loss": 2.2617905139923096,
"eval_runtime": 195.3121,
"eval_samples_per_second": 4.526,
"eval_steps_per_second": 0.568,
"step": 63
},
{
"epoch": 0.06098141972367794,
"grad_norm": 1.8397961854934692,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.3015,
"step": 64
},
{
"epoch": 0.06193425440686041,
"grad_norm": 1.4221397638320923,
"learning_rate": 3.289899283371657e-05,
"loss": 2.1798,
"step": 65
},
{
"epoch": 0.06288708909004288,
"grad_norm": 5.245110034942627,
"learning_rate": 3.12696703292044e-05,
"loss": 2.2569,
"step": 66
},
{
"epoch": 0.06383992377322535,
"grad_norm": 1.1781222820281982,
"learning_rate": 2.9663167846209998e-05,
"loss": 2.11,
"step": 67
},
{
"epoch": 0.06479275845640782,
"grad_norm": 1.3529887199401855,
"learning_rate": 2.8081442660546125e-05,
"loss": 2.2486,
"step": 68
},
{
"epoch": 0.06574559313959027,
"grad_norm": 1.1169679164886475,
"learning_rate": 2.6526421860705473e-05,
"loss": 2.1643,
"step": 69
},
{
"epoch": 0.06669842782277274,
"grad_norm": 1.144498586654663,
"learning_rate": 2.500000000000001e-05,
"loss": 2.2978,
"step": 70
},
{
"epoch": 0.06765126250595521,
"grad_norm": 1.211686611175537,
"learning_rate": 2.350403678833976e-05,
"loss": 2.1954,
"step": 71
},
{
"epoch": 0.06860409718913768,
"grad_norm": 1.1945290565490723,
"learning_rate": 2.2040354826462668e-05,
"loss": 2.2826,
"step": 72
},
{
"epoch": 0.06860409718913768,
"eval_loss": 2.2520430088043213,
"eval_runtime": 195.2934,
"eval_samples_per_second": 4.527,
"eval_steps_per_second": 0.568,
"step": 72
},
{
"epoch": 0.06955693187232015,
"grad_norm": 0.8045013546943665,
"learning_rate": 2.061073738537635e-05,
"loss": 2.0106,
"step": 73
},
{
"epoch": 0.07050976655550262,
"grad_norm": 0.9519303441047668,
"learning_rate": 1.9216926233717085e-05,
"loss": 2.1426,
"step": 74
},
{
"epoch": 0.07146260123868509,
"grad_norm": 0.9403889775276184,
"learning_rate": 1.7860619515673033e-05,
"loss": 2.1034,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.112830925340672e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}