fedovtt's picture
Training in progress, step 75, checkpoint
cf46189 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.43352601156069365,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005780346820809248,
"grad_norm": 11.805556297302246,
"learning_rate": 3.3333333333333335e-05,
"loss": 10.2277,
"step": 1
},
{
"epoch": 0.005780346820809248,
"eval_loss": 13.883577346801758,
"eval_runtime": 36.4564,
"eval_samples_per_second": 4.005,
"eval_steps_per_second": 2.002,
"step": 1
},
{
"epoch": 0.011560693641618497,
"grad_norm": 18.839975357055664,
"learning_rate": 6.666666666666667e-05,
"loss": 13.4834,
"step": 2
},
{
"epoch": 0.017341040462427744,
"grad_norm": 27.072202682495117,
"learning_rate": 0.0001,
"loss": 12.1853,
"step": 3
},
{
"epoch": 0.023121387283236993,
"grad_norm": 17.535160064697266,
"learning_rate": 9.99524110790929e-05,
"loss": 12.5384,
"step": 4
},
{
"epoch": 0.028901734104046242,
"grad_norm": 24.40502166748047,
"learning_rate": 9.980973490458728e-05,
"loss": 13.3989,
"step": 5
},
{
"epoch": 0.03468208092485549,
"grad_norm": 22.7767276763916,
"learning_rate": 9.957224306869053e-05,
"loss": 12.5996,
"step": 6
},
{
"epoch": 0.04046242774566474,
"grad_norm": 19.763896942138672,
"learning_rate": 9.924038765061042e-05,
"loss": 11.2578,
"step": 7
},
{
"epoch": 0.046242774566473986,
"grad_norm": 18.673879623413086,
"learning_rate": 9.881480035599667e-05,
"loss": 9.4941,
"step": 8
},
{
"epoch": 0.05202312138728324,
"grad_norm": 19.133956909179688,
"learning_rate": 9.829629131445342e-05,
"loss": 7.2947,
"step": 9
},
{
"epoch": 0.057803468208092484,
"grad_norm": 21.61801528930664,
"learning_rate": 9.768584753741134e-05,
"loss": 6.8867,
"step": 10
},
{
"epoch": 0.06358381502890173,
"grad_norm": 22.551382064819336,
"learning_rate": 9.698463103929542e-05,
"loss": 7.503,
"step": 11
},
{
"epoch": 0.06936416184971098,
"grad_norm": 17.857229232788086,
"learning_rate": 9.619397662556435e-05,
"loss": 5.9785,
"step": 12
},
{
"epoch": 0.07514450867052024,
"grad_norm": 18.44431495666504,
"learning_rate": 9.53153893518325e-05,
"loss": 5.5334,
"step": 13
},
{
"epoch": 0.08092485549132948,
"grad_norm": 15.550880432128906,
"learning_rate": 9.435054165891109e-05,
"loss": 4.7801,
"step": 14
},
{
"epoch": 0.08670520231213873,
"grad_norm": 12.370695114135742,
"learning_rate": 9.330127018922194e-05,
"loss": 3.7388,
"step": 15
},
{
"epoch": 0.09248554913294797,
"grad_norm": 22.72999382019043,
"learning_rate": 9.21695722906443e-05,
"loss": 4.6148,
"step": 16
},
{
"epoch": 0.09826589595375723,
"grad_norm": 17.064172744750977,
"learning_rate": 9.09576022144496e-05,
"loss": 3.888,
"step": 17
},
{
"epoch": 0.10404624277456648,
"grad_norm": 11.739884376525879,
"learning_rate": 8.966766701456177e-05,
"loss": 3.9586,
"step": 18
},
{
"epoch": 0.10982658959537572,
"grad_norm": 11.010674476623535,
"learning_rate": 8.83022221559489e-05,
"loss": 3.5787,
"step": 19
},
{
"epoch": 0.11560693641618497,
"grad_norm": 11.561777114868164,
"learning_rate": 8.68638668405062e-05,
"loss": 3.7656,
"step": 20
},
{
"epoch": 0.12138728323699421,
"grad_norm": 16.596763610839844,
"learning_rate": 8.535533905932738e-05,
"loss": 3.8886,
"step": 21
},
{
"epoch": 0.12716763005780346,
"grad_norm": 20.58455467224121,
"learning_rate": 8.377951038078302e-05,
"loss": 4.9991,
"step": 22
},
{
"epoch": 0.1329479768786127,
"grad_norm": 19.491470336914062,
"learning_rate": 8.213938048432697e-05,
"loss": 4.3794,
"step": 23
},
{
"epoch": 0.13872832369942195,
"grad_norm": 17.23562240600586,
"learning_rate": 8.043807145043604e-05,
"loss": 3.6044,
"step": 24
},
{
"epoch": 0.14450867052023122,
"grad_norm": 14.802312850952148,
"learning_rate": 7.86788218175523e-05,
"loss": 2.8371,
"step": 25
},
{
"epoch": 0.14450867052023122,
"eval_loss": 3.1586248874664307,
"eval_runtime": 36.1043,
"eval_samples_per_second": 4.044,
"eval_steps_per_second": 2.022,
"step": 25
},
{
"epoch": 0.15028901734104047,
"grad_norm": 12.471908569335938,
"learning_rate": 7.68649804173412e-05,
"loss": 2.1572,
"step": 26
},
{
"epoch": 0.15606936416184972,
"grad_norm": 12.431914329528809,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8061,
"step": 27
},
{
"epoch": 0.16184971098265896,
"grad_norm": 12.050883293151855,
"learning_rate": 7.308743066175172e-05,
"loss": 1.6819,
"step": 28
},
{
"epoch": 0.1676300578034682,
"grad_norm": 14.271095275878906,
"learning_rate": 7.113091308703498e-05,
"loss": 1.4357,
"step": 29
},
{
"epoch": 0.17341040462427745,
"grad_norm": 12.987905502319336,
"learning_rate": 6.91341716182545e-05,
"loss": 1.1009,
"step": 30
},
{
"epoch": 0.1791907514450867,
"grad_norm": 16.536724090576172,
"learning_rate": 6.710100716628344e-05,
"loss": 0.7876,
"step": 31
},
{
"epoch": 0.18497109826589594,
"grad_norm": 15.47735595703125,
"learning_rate": 6.503528997521366e-05,
"loss": 0.6424,
"step": 32
},
{
"epoch": 0.1907514450867052,
"grad_norm": 19.779516220092773,
"learning_rate": 6.294095225512603e-05,
"loss": 1.0814,
"step": 33
},
{
"epoch": 0.19653179190751446,
"grad_norm": 9.861597061157227,
"learning_rate": 6.0821980696905146e-05,
"loss": 0.4469,
"step": 34
},
{
"epoch": 0.2023121387283237,
"grad_norm": 15.623589515686035,
"learning_rate": 5.868240888334653e-05,
"loss": 0.7972,
"step": 35
},
{
"epoch": 0.20809248554913296,
"grad_norm": 17.796567916870117,
"learning_rate": 5.6526309611002594e-05,
"loss": 1.3914,
"step": 36
},
{
"epoch": 0.2138728323699422,
"grad_norm": 8.64621639251709,
"learning_rate": 5.435778713738292e-05,
"loss": 0.2915,
"step": 37
},
{
"epoch": 0.21965317919075145,
"grad_norm": 15.916255950927734,
"learning_rate": 5.218096936826681e-05,
"loss": 0.7056,
"step": 38
},
{
"epoch": 0.2254335260115607,
"grad_norm": 9.389236450195312,
"learning_rate": 5e-05,
"loss": 0.5004,
"step": 39
},
{
"epoch": 0.23121387283236994,
"grad_norm": 11.300865173339844,
"learning_rate": 4.781903063173321e-05,
"loss": 0.4527,
"step": 40
},
{
"epoch": 0.23699421965317918,
"grad_norm": 15.568708419799805,
"learning_rate": 4.564221286261709e-05,
"loss": 0.7751,
"step": 41
},
{
"epoch": 0.24277456647398843,
"grad_norm": 15.084271430969238,
"learning_rate": 4.347369038899744e-05,
"loss": 0.652,
"step": 42
},
{
"epoch": 0.24855491329479767,
"grad_norm": 13.203545570373535,
"learning_rate": 4.131759111665349e-05,
"loss": 0.5794,
"step": 43
},
{
"epoch": 0.2543352601156069,
"grad_norm": 25.20144271850586,
"learning_rate": 3.917801930309486e-05,
"loss": 6.1163,
"step": 44
},
{
"epoch": 0.26011560693641617,
"grad_norm": 27.02972984313965,
"learning_rate": 3.705904774487396e-05,
"loss": 5.3051,
"step": 45
},
{
"epoch": 0.2658959537572254,
"grad_norm": 20.16910171508789,
"learning_rate": 3.4964710024786354e-05,
"loss": 4.9375,
"step": 46
},
{
"epoch": 0.27167630057803466,
"grad_norm": 16.455686569213867,
"learning_rate": 3.289899283371657e-05,
"loss": 4.2718,
"step": 47
},
{
"epoch": 0.2774566473988439,
"grad_norm": 18.511323928833008,
"learning_rate": 3.086582838174551e-05,
"loss": 3.8113,
"step": 48
},
{
"epoch": 0.2832369942196532,
"grad_norm": 15.772967338562012,
"learning_rate": 2.886908691296504e-05,
"loss": 3.2568,
"step": 49
},
{
"epoch": 0.28901734104046245,
"grad_norm": 17.237407684326172,
"learning_rate": 2.6912569338248315e-05,
"loss": 4.7211,
"step": 50
},
{
"epoch": 0.28901734104046245,
"eval_loss": 2.5243701934814453,
"eval_runtime": 36.4442,
"eval_samples_per_second": 4.006,
"eval_steps_per_second": 2.003,
"step": 50
},
{
"epoch": 0.2947976878612717,
"grad_norm": 15.533300399780273,
"learning_rate": 2.500000000000001e-05,
"loss": 4.235,
"step": 51
},
{
"epoch": 0.30057803468208094,
"grad_norm": 17.215396881103516,
"learning_rate": 2.3135019582658802e-05,
"loss": 4.012,
"step": 52
},
{
"epoch": 0.3063583815028902,
"grad_norm": 18.76911163330078,
"learning_rate": 2.132117818244771e-05,
"loss": 4.2551,
"step": 53
},
{
"epoch": 0.31213872832369943,
"grad_norm": 22.53728675842285,
"learning_rate": 1.9561928549563968e-05,
"loss": 4.7717,
"step": 54
},
{
"epoch": 0.3179190751445087,
"grad_norm": 20.796886444091797,
"learning_rate": 1.7860619515673033e-05,
"loss": 4.5363,
"step": 55
},
{
"epoch": 0.3236994219653179,
"grad_norm": 21.427318572998047,
"learning_rate": 1.622048961921699e-05,
"loss": 3.6933,
"step": 56
},
{
"epoch": 0.32947976878612717,
"grad_norm": 18.019439697265625,
"learning_rate": 1.4644660940672627e-05,
"loss": 4.5195,
"step": 57
},
{
"epoch": 0.3352601156069364,
"grad_norm": 20.15680503845215,
"learning_rate": 1.3136133159493802e-05,
"loss": 4.5915,
"step": 58
},
{
"epoch": 0.34104046242774566,
"grad_norm": 16.63870620727539,
"learning_rate": 1.1697777844051105e-05,
"loss": 3.874,
"step": 59
},
{
"epoch": 0.3468208092485549,
"grad_norm": 16.888946533203125,
"learning_rate": 1.0332332985438248e-05,
"loss": 3.5552,
"step": 60
},
{
"epoch": 0.35260115606936415,
"grad_norm": 12.754011154174805,
"learning_rate": 9.042397785550405e-06,
"loss": 3.1729,
"step": 61
},
{
"epoch": 0.3583815028901734,
"grad_norm": 15.254498481750488,
"learning_rate": 7.830427709355725e-06,
"loss": 3.6215,
"step": 62
},
{
"epoch": 0.36416184971098264,
"grad_norm": 14.381755828857422,
"learning_rate": 6.698729810778065e-06,
"loss": 1.9965,
"step": 63
},
{
"epoch": 0.3699421965317919,
"grad_norm": 10.36881160736084,
"learning_rate": 5.649458341088915e-06,
"loss": 0.8034,
"step": 64
},
{
"epoch": 0.37572254335260113,
"grad_norm": 8.4619140625,
"learning_rate": 4.684610648167503e-06,
"loss": 0.9042,
"step": 65
},
{
"epoch": 0.3815028901734104,
"grad_norm": 12.096026420593262,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.5585,
"step": 66
},
{
"epoch": 0.3872832369942196,
"grad_norm": 8.332564353942871,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.6868,
"step": 67
},
{
"epoch": 0.3930635838150289,
"grad_norm": 12.302162170410156,
"learning_rate": 2.314152462588659e-06,
"loss": 0.9655,
"step": 68
},
{
"epoch": 0.3988439306358382,
"grad_norm": 10.966118812561035,
"learning_rate": 1.70370868554659e-06,
"loss": 0.6503,
"step": 69
},
{
"epoch": 0.4046242774566474,
"grad_norm": 6.788025379180908,
"learning_rate": 1.1851996440033319e-06,
"loss": 0.4754,
"step": 70
},
{
"epoch": 0.41040462427745666,
"grad_norm": 18.9864559173584,
"learning_rate": 7.596123493895991e-07,
"loss": 0.7405,
"step": 71
},
{
"epoch": 0.4161849710982659,
"grad_norm": 10.223641395568848,
"learning_rate": 4.277569313094809e-07,
"loss": 0.5099,
"step": 72
},
{
"epoch": 0.42196531791907516,
"grad_norm": 8.91974925994873,
"learning_rate": 1.9026509541272275e-07,
"loss": 0.581,
"step": 73
},
{
"epoch": 0.4277456647398844,
"grad_norm": 12.053977966308594,
"learning_rate": 4.7588920907110094e-08,
"loss": 1.0495,
"step": 74
},
{
"epoch": 0.43352601156069365,
"grad_norm": 9.938634872436523,
"learning_rate": 0.0,
"loss": 0.6133,
"step": 75
},
{
"epoch": 0.43352601156069365,
"eval_loss": 1.9379535913467407,
"eval_runtime": 36.0904,
"eval_samples_per_second": 4.045,
"eval_steps_per_second": 2.023,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7232841445002445e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}