prxy5605's picture
Training in progress, epoch 0, checkpoint
74e99ab verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.06763897696047347,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0001690974424011837,
"eval_loss": 0.6459227800369263,
"eval_runtime": 201.5767,
"eval_samples_per_second": 12.353,
"eval_steps_per_second": 6.176,
"step": 1
},
{
"epoch": 0.0008454872120059184,
"grad_norm": 0.15712441504001617,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.5307,
"step": 5
},
{
"epoch": 0.0016909744240118367,
"grad_norm": 0.20046602189540863,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.5676,
"step": 10
},
{
"epoch": 0.0025364616360177552,
"grad_norm": 0.20849193632602692,
"learning_rate": 5e-05,
"loss": 0.5701,
"step": 15
},
{
"epoch": 0.0033819488480236735,
"grad_norm": 0.27390506863594055,
"learning_rate": 6.666666666666667e-05,
"loss": 0.6423,
"step": 20
},
{
"epoch": 0.004227436060029592,
"grad_norm": 0.22099897265434265,
"learning_rate": 8.333333333333334e-05,
"loss": 0.5055,
"step": 25
},
{
"epoch": 0.0050729232720355105,
"grad_norm": 0.2435217648744583,
"learning_rate": 0.0001,
"loss": 0.5144,
"step": 30
},
{
"epoch": 0.005918410484041429,
"grad_norm": 0.29201892018318176,
"learning_rate": 9.995494831023409e-05,
"loss": 0.5433,
"step": 35
},
{
"epoch": 0.006763897696047347,
"grad_norm": 0.40463411808013916,
"learning_rate": 9.981987442712633e-05,
"loss": 0.5898,
"step": 40
},
{
"epoch": 0.007609384908053266,
"grad_norm": 0.3479752838611603,
"learning_rate": 9.959502176294383e-05,
"loss": 0.5306,
"step": 45
},
{
"epoch": 0.008454872120059184,
"grad_norm": 0.6131821274757385,
"learning_rate": 9.928079551738543e-05,
"loss": 0.5225,
"step": 50
},
{
"epoch": 0.009300359332065102,
"grad_norm": 0.1976812481880188,
"learning_rate": 9.887776194738432e-05,
"loss": 0.4832,
"step": 55
},
{
"epoch": 0.010145846544071021,
"grad_norm": 0.19775013625621796,
"learning_rate": 9.838664734667495e-05,
"loss": 0.5004,
"step": 60
},
{
"epoch": 0.01099133375607694,
"grad_norm": 0.19548407196998596,
"learning_rate": 9.780833673696254e-05,
"loss": 0.5248,
"step": 65
},
{
"epoch": 0.011836820968082858,
"grad_norm": 0.2102847397327423,
"learning_rate": 9.714387227305422e-05,
"loss": 0.4872,
"step": 70
},
{
"epoch": 0.012682308180088777,
"grad_norm": 0.20248602330684662,
"learning_rate": 9.639445136482548e-05,
"loss": 0.5021,
"step": 75
},
{
"epoch": 0.013527795392094694,
"grad_norm": 0.2491309642791748,
"learning_rate": 9.55614245194068e-05,
"loss": 0.5686,
"step": 80
},
{
"epoch": 0.014373282604100613,
"grad_norm": 0.2637186050415039,
"learning_rate": 9.464629290747842e-05,
"loss": 0.4991,
"step": 85
},
{
"epoch": 0.015218769816106531,
"grad_norm": 0.31047478318214417,
"learning_rate": 9.365070565805941e-05,
"loss": 0.5182,
"step": 90
},
{
"epoch": 0.01606425702811245,
"grad_norm": 0.39293938875198364,
"learning_rate": 9.257645688666556e-05,
"loss": 0.5507,
"step": 95
},
{
"epoch": 0.016909744240118367,
"grad_norm": 0.7622083425521851,
"learning_rate": 9.142548246219212e-05,
"loss": 0.5148,
"step": 100
},
{
"epoch": 0.016909744240118367,
"eval_loss": 0.49994635581970215,
"eval_runtime": 203.6154,
"eval_samples_per_second": 12.229,
"eval_steps_per_second": 6.114,
"step": 100
},
{
"epoch": 0.017755231452124286,
"grad_norm": 0.1981024593114853,
"learning_rate": 9.019985651834703e-05,
"loss": 0.4192,
"step": 105
},
{
"epoch": 0.018600718664130204,
"grad_norm": 0.19838833808898926,
"learning_rate": 8.890178771592199e-05,
"loss": 0.4863,
"step": 110
},
{
"epoch": 0.019446205876136123,
"grad_norm": 0.21621914207935333,
"learning_rate": 8.753361526263621e-05,
"loss": 0.5245,
"step": 115
},
{
"epoch": 0.020291693088142042,
"grad_norm": 0.2197190523147583,
"learning_rate": 8.609780469772623e-05,
"loss": 0.4785,
"step": 120
},
{
"epoch": 0.02113718030014796,
"grad_norm": 0.2307160645723343,
"learning_rate": 8.459694344887732e-05,
"loss": 0.4939,
"step": 125
},
{
"epoch": 0.02198266751215388,
"grad_norm": 0.2556055784225464,
"learning_rate": 8.303373616950408e-05,
"loss": 0.5202,
"step": 130
},
{
"epoch": 0.022828154724159798,
"grad_norm": 0.3053711950778961,
"learning_rate": 8.141099986478212e-05,
"loss": 0.5207,
"step": 135
},
{
"epoch": 0.023673641936165717,
"grad_norm": 0.3650396168231964,
"learning_rate": 7.973165881521434e-05,
"loss": 0.5146,
"step": 140
},
{
"epoch": 0.024519129148171635,
"grad_norm": 0.4481881856918335,
"learning_rate": 7.799873930687978e-05,
"loss": 0.5183,
"step": 145
},
{
"epoch": 0.025364616360177554,
"grad_norm": 0.7726387977600098,
"learning_rate": 7.621536417786159e-05,
"loss": 0.4749,
"step": 150
},
{
"epoch": 0.02621010357218347,
"grad_norm": 0.18763859570026398,
"learning_rate": 7.438474719068173e-05,
"loss": 0.4664,
"step": 155
},
{
"epoch": 0.027055590784189388,
"grad_norm": 0.1818513423204422,
"learning_rate": 7.251018724088367e-05,
"loss": 0.4774,
"step": 160
},
{
"epoch": 0.027901077996195307,
"grad_norm": 0.23504586517810822,
"learning_rate": 7.059506241219965e-05,
"loss": 0.4938,
"step": 165
},
{
"epoch": 0.028746565208201225,
"grad_norm": 0.22553101181983948,
"learning_rate": 6.864282388901544e-05,
"loss": 0.4936,
"step": 170
},
{
"epoch": 0.029592052420207144,
"grad_norm": 0.21830540895462036,
"learning_rate": 6.665698973710288e-05,
"loss": 0.4305,
"step": 175
},
{
"epoch": 0.030437539632213063,
"grad_norm": 0.2550351619720459,
"learning_rate": 6.464113856382752e-05,
"loss": 0.4829,
"step": 180
},
{
"epoch": 0.03128302684421898,
"grad_norm": 0.3035644590854645,
"learning_rate": 6.259890306925627e-05,
"loss": 0.4895,
"step": 185
},
{
"epoch": 0.0321285140562249,
"grad_norm": 0.3919617533683777,
"learning_rate": 6.0533963499786314e-05,
"loss": 0.5451,
"step": 190
},
{
"epoch": 0.032974001268230815,
"grad_norm": 0.4303884208202362,
"learning_rate": 5.8450041016092464e-05,
"loss": 0.4667,
"step": 195
},
{
"epoch": 0.033819488480236734,
"grad_norm": 0.829088032245636,
"learning_rate": 5.6350890987343944e-05,
"loss": 0.4154,
"step": 200
},
{
"epoch": 0.033819488480236734,
"eval_loss": 0.47849199175834656,
"eval_runtime": 203.4816,
"eval_samples_per_second": 12.237,
"eval_steps_per_second": 6.118,
"step": 200
},
{
"epoch": 0.03466497569224265,
"grad_norm": 0.1753915548324585,
"learning_rate": 5.4240296223775465e-05,
"loss": 0.4032,
"step": 205
},
{
"epoch": 0.03551046290424857,
"grad_norm": 0.19739754498004913,
"learning_rate": 5.212206015980742e-05,
"loss": 0.4729,
"step": 210
},
{
"epoch": 0.03635595011625449,
"grad_norm": 0.22465206682682037,
"learning_rate": 5e-05,
"loss": 0.4631,
"step": 215
},
{
"epoch": 0.03720143732826041,
"grad_norm": 0.24705617129802704,
"learning_rate": 4.78779398401926e-05,
"loss": 0.4618,
"step": 220
},
{
"epoch": 0.03804692454026633,
"grad_norm": 0.28556302189826965,
"learning_rate": 4.575970377622456e-05,
"loss": 0.5512,
"step": 225
},
{
"epoch": 0.038892411752272246,
"grad_norm": 0.3080470561981201,
"learning_rate": 4.364910901265606e-05,
"loss": 0.4949,
"step": 230
},
{
"epoch": 0.039737898964278165,
"grad_norm": 0.3479246199131012,
"learning_rate": 4.1549958983907555e-05,
"loss": 0.4499,
"step": 235
},
{
"epoch": 0.040583386176284084,
"grad_norm": 0.3600875735282898,
"learning_rate": 3.94660365002137e-05,
"loss": 0.4454,
"step": 240
},
{
"epoch": 0.04142887338829,
"grad_norm": 0.5033133625984192,
"learning_rate": 3.740109693074375e-05,
"loss": 0.4829,
"step": 245
},
{
"epoch": 0.04227436060029592,
"grad_norm": 0.9750732779502869,
"learning_rate": 3.5358861436172485e-05,
"loss": 0.4892,
"step": 250
},
{
"epoch": 0.04311984781230184,
"grad_norm": 0.18351560831069946,
"learning_rate": 3.334301026289712e-05,
"loss": 0.4062,
"step": 255
},
{
"epoch": 0.04396533502430776,
"grad_norm": 0.21633638441562653,
"learning_rate": 3.135717611098458e-05,
"loss": 0.4706,
"step": 260
},
{
"epoch": 0.04481082223631368,
"grad_norm": 0.21196366846561432,
"learning_rate": 2.9404937587800375e-05,
"loss": 0.4781,
"step": 265
},
{
"epoch": 0.045656309448319596,
"grad_norm": 0.25018104910850525,
"learning_rate": 2.748981275911633e-05,
"loss": 0.4947,
"step": 270
},
{
"epoch": 0.046501796660325515,
"grad_norm": 0.27837511897087097,
"learning_rate": 2.5615252809318284e-05,
"loss": 0.4824,
"step": 275
},
{
"epoch": 0.04734728387233143,
"grad_norm": 0.28721874952316284,
"learning_rate": 2.3784635822138424e-05,
"loss": 0.4753,
"step": 280
},
{
"epoch": 0.04819277108433735,
"grad_norm": 0.3178447484970093,
"learning_rate": 2.2001260693120233e-05,
"loss": 0.4643,
"step": 285
},
{
"epoch": 0.04903825829634327,
"grad_norm": 0.3714665174484253,
"learning_rate": 2.026834118478567e-05,
"loss": 0.4892,
"step": 290
},
{
"epoch": 0.04988374550834919,
"grad_norm": 0.470114141702652,
"learning_rate": 1.858900013521788e-05,
"loss": 0.5121,
"step": 295
},
{
"epoch": 0.05072923272035511,
"grad_norm": 0.8095857501029968,
"learning_rate": 1.6966263830495936e-05,
"loss": 0.4473,
"step": 300
},
{
"epoch": 0.05072923272035511,
"eval_loss": 0.4689193367958069,
"eval_runtime": 203.7914,
"eval_samples_per_second": 12.218,
"eval_steps_per_second": 6.109,
"step": 300
},
{
"epoch": 0.05157471993236102,
"grad_norm": 0.17946785688400269,
"learning_rate": 1.5403056551122697e-05,
"loss": 0.4154,
"step": 305
},
{
"epoch": 0.05242020714436694,
"grad_norm": 0.23259112238883972,
"learning_rate": 1.3902195302273779e-05,
"loss": 0.4642,
"step": 310
},
{
"epoch": 0.05326569435637286,
"grad_norm": 0.21611733734607697,
"learning_rate": 1.246638473736378e-05,
"loss": 0.4635,
"step": 315
},
{
"epoch": 0.054111181568378776,
"grad_norm": 0.24835757911205292,
"learning_rate": 1.1098212284078036e-05,
"loss": 0.4903,
"step": 320
},
{
"epoch": 0.054956668780384695,
"grad_norm": 0.26646360754966736,
"learning_rate": 9.800143481652979e-06,
"loss": 0.4367,
"step": 325
},
{
"epoch": 0.05580215599239061,
"grad_norm": 0.3183721899986267,
"learning_rate": 8.574517537807897e-06,
"loss": 0.4668,
"step": 330
},
{
"epoch": 0.05664764320439653,
"grad_norm": 0.3372812867164612,
"learning_rate": 7.423543113334436e-06,
"loss": 0.4071,
"step": 335
},
{
"epoch": 0.05749313041640245,
"grad_norm": 0.4076240360736847,
"learning_rate": 6.349294341940593e-06,
"loss": 0.4916,
"step": 340
},
{
"epoch": 0.05833861762840837,
"grad_norm": 0.5152222514152527,
"learning_rate": 5.353707092521582e-06,
"loss": 0.5302,
"step": 345
},
{
"epoch": 0.05918410484041429,
"grad_norm": 0.8785345554351807,
"learning_rate": 4.43857548059321e-06,
"loss": 0.4065,
"step": 350
},
{
"epoch": 0.06002959205242021,
"grad_norm": 0.18611513078212738,
"learning_rate": 3.605548635174533e-06,
"loss": 0.4258,
"step": 355
},
{
"epoch": 0.060875079264426125,
"grad_norm": 0.220343679189682,
"learning_rate": 2.85612772694579e-06,
"loss": 0.4736,
"step": 360
},
{
"epoch": 0.061720566476432044,
"grad_norm": 0.22353102266788483,
"learning_rate": 2.191663263037458e-06,
"loss": 0.4437,
"step": 365
},
{
"epoch": 0.06256605368843796,
"grad_norm": 0.24478361010551453,
"learning_rate": 1.6133526533250565e-06,
"loss": 0.4699,
"step": 370
},
{
"epoch": 0.06341154090044387,
"grad_norm": 0.2732827663421631,
"learning_rate": 1.1222380526156928e-06,
"loss": 0.4612,
"step": 375
},
{
"epoch": 0.0642570281124498,
"grad_norm": 0.2980816066265106,
"learning_rate": 7.192044826145771e-07,
"loss": 0.4447,
"step": 380
},
{
"epoch": 0.06510251532445571,
"grad_norm": 0.3370252549648285,
"learning_rate": 4.049782370561583e-07,
"loss": 0.4983,
"step": 385
},
{
"epoch": 0.06594800253646163,
"grad_norm": 0.38385066390037537,
"learning_rate": 1.8012557287367392e-07,
"loss": 0.4681,
"step": 390
},
{
"epoch": 0.06679348974846755,
"grad_norm": 0.4089011251926422,
"learning_rate": 4.5051689765929214e-08,
"loss": 0.4885,
"step": 395
},
{
"epoch": 0.06763897696047347,
"grad_norm": 0.8262264728546143,
"learning_rate": 0.0,
"loss": 0.4751,
"step": 400
},
{
"epoch": 0.06763897696047347,
"eval_loss": 0.4665312170982361,
"eval_runtime": 203.2583,
"eval_samples_per_second": 12.25,
"eval_steps_per_second": 6.125,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3762794998464512e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}