TracyTank's picture
Training in progress, step 300, checkpoint
b220d99 verified
raw
history blame
12.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07801833430856252,
"eval_steps": 100,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000260061114361875,
"eval_loss": 3.210697650909424,
"eval_runtime": 46.7762,
"eval_samples_per_second": 138.468,
"eval_steps_per_second": 17.316,
"step": 1
},
{
"epoch": 0.001300305571809375,
"grad_norm": 0.1656179130077362,
"learning_rate": 5e-05,
"loss": 3.2144,
"step": 5
},
{
"epoch": 0.00260061114361875,
"grad_norm": 0.19077427685260773,
"learning_rate": 0.0001,
"loss": 3.1656,
"step": 10
},
{
"epoch": 0.0039009167154281255,
"grad_norm": 0.20196713507175446,
"learning_rate": 9.995944990857849e-05,
"loss": 3.1702,
"step": 15
},
{
"epoch": 0.0052012222872375,
"grad_norm": 0.24549086391925812,
"learning_rate": 9.983786540671051e-05,
"loss": 3.1669,
"step": 20
},
{
"epoch": 0.006501527859046876,
"grad_norm": 0.2799537777900696,
"learning_rate": 9.96354437049027e-05,
"loss": 3.0444,
"step": 25
},
{
"epoch": 0.007801833430856251,
"grad_norm": 0.2620985209941864,
"learning_rate": 9.935251313189564e-05,
"loss": 2.9902,
"step": 30
},
{
"epoch": 0.009102139002665627,
"grad_norm": 0.25993800163269043,
"learning_rate": 9.898953260211338e-05,
"loss": 2.7868,
"step": 35
},
{
"epoch": 0.010402444574475,
"grad_norm": 0.25809502601623535,
"learning_rate": 9.85470908713026e-05,
"loss": 2.9121,
"step": 40
},
{
"epoch": 0.011702750146284377,
"grad_norm": 0.23389342427253723,
"learning_rate": 9.802590558156862e-05,
"loss": 2.7214,
"step": 45
},
{
"epoch": 0.013003055718093752,
"grad_norm": 0.22849056124687195,
"learning_rate": 9.742682209735727e-05,
"loss": 2.7599,
"step": 50
},
{
"epoch": 0.014303361289903128,
"grad_norm": 0.2257491499185562,
"learning_rate": 9.675081213427076e-05,
"loss": 2.6229,
"step": 55
},
{
"epoch": 0.015603666861712502,
"grad_norm": 0.22627227008342743,
"learning_rate": 9.599897218294122e-05,
"loss": 2.6735,
"step": 60
},
{
"epoch": 0.016903972433521878,
"grad_norm": 0.21060359477996826,
"learning_rate": 9.517252173051911e-05,
"loss": 2.6384,
"step": 65
},
{
"epoch": 0.018204278005331254,
"grad_norm": 0.20334959030151367,
"learning_rate": 9.42728012826605e-05,
"loss": 2.5622,
"step": 70
},
{
"epoch": 0.01950458357714063,
"grad_norm": 0.19899047911167145,
"learning_rate": 9.330127018922194e-05,
"loss": 2.605,
"step": 75
},
{
"epoch": 0.02080488914895,
"grad_norm": 0.21144025027751923,
"learning_rate": 9.225950427718975e-05,
"loss": 2.5366,
"step": 80
},
{
"epoch": 0.022105194720759377,
"grad_norm": 0.19850236177444458,
"learning_rate": 9.114919329468282e-05,
"loss": 2.4836,
"step": 85
},
{
"epoch": 0.023405500292568753,
"grad_norm": 0.20592238008975983,
"learning_rate": 8.997213817017507e-05,
"loss": 2.4513,
"step": 90
},
{
"epoch": 0.02470580586437813,
"grad_norm": 0.233050137758255,
"learning_rate": 8.873024809138272e-05,
"loss": 2.4859,
"step": 95
},
{
"epoch": 0.026006111436187505,
"grad_norm": 0.2322545349597931,
"learning_rate": 8.742553740855506e-05,
"loss": 2.4244,
"step": 100
},
{
"epoch": 0.026006111436187505,
"eval_loss": 2.4954044818878174,
"eval_runtime": 46.5346,
"eval_samples_per_second": 139.187,
"eval_steps_per_second": 17.406,
"step": 100
},
{
"epoch": 0.02730641700799688,
"grad_norm": 0.20528066158294678,
"learning_rate": 8.606012236719073e-05,
"loss": 2.4713,
"step": 105
},
{
"epoch": 0.028606722579806256,
"grad_norm": 0.20817121863365173,
"learning_rate": 8.463621767547998e-05,
"loss": 2.504,
"step": 110
},
{
"epoch": 0.02990702815161563,
"grad_norm": 0.19070002436637878,
"learning_rate": 8.315613291203976e-05,
"loss": 2.4456,
"step": 115
},
{
"epoch": 0.031207333723425004,
"grad_norm": 0.2086874395608902,
"learning_rate": 8.162226877976887e-05,
"loss": 2.4541,
"step": 120
},
{
"epoch": 0.03250763929523438,
"grad_norm": 0.21223700046539307,
"learning_rate": 8.003711321189895e-05,
"loss": 2.4791,
"step": 125
},
{
"epoch": 0.033807944867043756,
"grad_norm": 0.20589032769203186,
"learning_rate": 7.840323733655778e-05,
"loss": 2.5583,
"step": 130
},
{
"epoch": 0.03510825043885313,
"grad_norm": 0.21625283360481262,
"learning_rate": 7.672329130639005e-05,
"loss": 2.4741,
"step": 135
},
{
"epoch": 0.03640855601066251,
"grad_norm": 0.2250359058380127,
"learning_rate": 7.500000000000001e-05,
"loss": 2.5563,
"step": 140
},
{
"epoch": 0.03770886158247188,
"grad_norm": 0.22031283378601074,
"learning_rate": 7.323615860218843e-05,
"loss": 2.4719,
"step": 145
},
{
"epoch": 0.03900916715428126,
"grad_norm": 0.21579761803150177,
"learning_rate": 7.143462807015271e-05,
"loss": 2.4485,
"step": 150
},
{
"epoch": 0.04030947272609063,
"grad_norm": 0.2309805005788803,
"learning_rate": 6.959833049300377e-05,
"loss": 2.4822,
"step": 155
},
{
"epoch": 0.0416097782979,
"grad_norm": 0.19943702220916748,
"learning_rate": 6.773024435212678e-05,
"loss": 2.4214,
"step": 160
},
{
"epoch": 0.04291008386970938,
"grad_norm": 0.21527348458766937,
"learning_rate": 6.583339969007363e-05,
"loss": 2.3161,
"step": 165
},
{
"epoch": 0.044210389441518755,
"grad_norm": 0.2162819504737854,
"learning_rate": 6.391087319582264e-05,
"loss": 2.4201,
"step": 170
},
{
"epoch": 0.045510695013328134,
"grad_norm": 0.2415967732667923,
"learning_rate": 6.19657832143779e-05,
"loss": 2.3804,
"step": 175
},
{
"epoch": 0.046811000585137506,
"grad_norm": 0.22041171789169312,
"learning_rate": 6.0001284688802226e-05,
"loss": 2.4879,
"step": 180
},
{
"epoch": 0.048111306156946886,
"grad_norm": 0.2270502895116806,
"learning_rate": 5.8020564042888015e-05,
"loss": 2.3864,
"step": 185
},
{
"epoch": 0.04941161172875626,
"grad_norm": 0.2288903146982193,
"learning_rate": 5.602683401276615e-05,
"loss": 2.4525,
"step": 190
},
{
"epoch": 0.05071191730056563,
"grad_norm": 0.2158132791519165,
"learning_rate": 5.402332843583631e-05,
"loss": 2.3903,
"step": 195
},
{
"epoch": 0.05201222287237501,
"grad_norm": 0.23484806716442108,
"learning_rate": 5.201329700547076e-05,
"loss": 2.4632,
"step": 200
},
{
"epoch": 0.05201222287237501,
"eval_loss": 2.43571400642395,
"eval_runtime": 48.746,
"eval_samples_per_second": 132.872,
"eval_steps_per_second": 16.617,
"step": 200
},
{
"epoch": 0.05331252844418438,
"grad_norm": 0.22658920288085938,
"learning_rate": 5e-05,
"loss": 2.4468,
"step": 205
},
{
"epoch": 0.05461283401599376,
"grad_norm": 0.22382205724716187,
"learning_rate": 4.798670299452926e-05,
"loss": 2.4227,
"step": 210
},
{
"epoch": 0.05591313958780313,
"grad_norm": 0.2128317654132843,
"learning_rate": 4.597667156416371e-05,
"loss": 2.394,
"step": 215
},
{
"epoch": 0.05721344515961251,
"grad_norm": 0.20399969816207886,
"learning_rate": 4.397316598723385e-05,
"loss": 2.3722,
"step": 220
},
{
"epoch": 0.058513750731421885,
"grad_norm": 0.22103694081306458,
"learning_rate": 4.197943595711198e-05,
"loss": 2.4722,
"step": 225
},
{
"epoch": 0.05981405630323126,
"grad_norm": 0.19609223306179047,
"learning_rate": 3.9998715311197785e-05,
"loss": 2.4358,
"step": 230
},
{
"epoch": 0.061114361875040636,
"grad_norm": 0.20797300338745117,
"learning_rate": 3.803421678562213e-05,
"loss": 2.4147,
"step": 235
},
{
"epoch": 0.06241466744685001,
"grad_norm": 0.23733116686344147,
"learning_rate": 3.608912680417737e-05,
"loss": 2.5038,
"step": 240
},
{
"epoch": 0.06371497301865939,
"grad_norm": 0.20630250871181488,
"learning_rate": 3.4166600309926387e-05,
"loss": 2.43,
"step": 245
},
{
"epoch": 0.06501527859046877,
"grad_norm": 0.21626047790050507,
"learning_rate": 3.226975564787322e-05,
"loss": 2.3791,
"step": 250
},
{
"epoch": 0.06631558416227813,
"grad_norm": 0.22082562744617462,
"learning_rate": 3.0401669506996256e-05,
"loss": 2.3927,
"step": 255
},
{
"epoch": 0.06761588973408751,
"grad_norm": 0.2386007159948349,
"learning_rate": 2.8565371929847284e-05,
"loss": 2.4492,
"step": 260
},
{
"epoch": 0.06891619530589689,
"grad_norm": 0.23111873865127563,
"learning_rate": 2.6763841397811573e-05,
"loss": 2.4282,
"step": 265
},
{
"epoch": 0.07021650087770626,
"grad_norm": 0.23999303579330444,
"learning_rate": 2.500000000000001e-05,
"loss": 2.4116,
"step": 270
},
{
"epoch": 0.07151680644951564,
"grad_norm": 0.23608632385730743,
"learning_rate": 2.3276708693609943e-05,
"loss": 2.5647,
"step": 275
},
{
"epoch": 0.07281711202132501,
"grad_norm": 0.22393612563610077,
"learning_rate": 2.1596762663442218e-05,
"loss": 2.516,
"step": 280
},
{
"epoch": 0.07411741759313438,
"grad_norm": 0.2267947643995285,
"learning_rate": 1.996288678810105e-05,
"loss": 2.4214,
"step": 285
},
{
"epoch": 0.07541772316494376,
"grad_norm": 0.21349841356277466,
"learning_rate": 1.837773122023114e-05,
"loss": 2.4265,
"step": 290
},
{
"epoch": 0.07671802873675314,
"grad_norm": 0.21270251274108887,
"learning_rate": 1.684386708796025e-05,
"loss": 2.4332,
"step": 295
},
{
"epoch": 0.07801833430856252,
"grad_norm": 0.23567631840705872,
"learning_rate": 1.536378232452003e-05,
"loss": 2.396,
"step": 300
},
{
"epoch": 0.07801833430856252,
"eval_loss": 2.4196038246154785,
"eval_runtime": 47.2697,
"eval_samples_per_second": 137.022,
"eval_steps_per_second": 17.136,
"step": 300
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.905704121348915e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}