GaetanMichelet's picture
Model save
2562f00 verified
{
"best_metric": 0.9278689622879028,
"best_model_checkpoint": "data/Gemma-2-2B_task-1_180-samples_config-1_full_auto/checkpoint-102",
"epoch": 13.0,
"eval_steps": 500,
"global_step": 221,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.058823529411764705,
"grad_norm": 1.211376428604126,
"learning_rate": 1.1764705882352942e-06,
"loss": 2.2666,
"step": 1
},
{
"epoch": 0.11764705882352941,
"grad_norm": 1.2106879949569702,
"learning_rate": 2.3529411764705885e-06,
"loss": 2.3462,
"step": 2
},
{
"epoch": 0.23529411764705882,
"grad_norm": 1.2014598846435547,
"learning_rate": 4.705882352941177e-06,
"loss": 2.2134,
"step": 4
},
{
"epoch": 0.35294117647058826,
"grad_norm": 1.1927666664123535,
"learning_rate": 7.058823529411765e-06,
"loss": 2.2377,
"step": 6
},
{
"epoch": 0.47058823529411764,
"grad_norm": 1.3306472301483154,
"learning_rate": 9.411764705882354e-06,
"loss": 2.249,
"step": 8
},
{
"epoch": 0.5882352941176471,
"grad_norm": 1.2041209936141968,
"learning_rate": 1.1764705882352942e-05,
"loss": 2.2388,
"step": 10
},
{
"epoch": 0.7058823529411765,
"grad_norm": 1.2755601406097412,
"learning_rate": 1.411764705882353e-05,
"loss": 2.2887,
"step": 12
},
{
"epoch": 0.8235294117647058,
"grad_norm": 1.32211172580719,
"learning_rate": 1.647058823529412e-05,
"loss": 2.2792,
"step": 14
},
{
"epoch": 0.9411764705882353,
"grad_norm": 1.220287561416626,
"learning_rate": 1.8823529411764708e-05,
"loss": 2.1329,
"step": 16
},
{
"epoch": 1.0,
"eval_loss": 2.07915997505188,
"eval_runtime": 3.2915,
"eval_samples_per_second": 10.937,
"eval_steps_per_second": 10.937,
"step": 17
},
{
"epoch": 1.0588235294117647,
"grad_norm": 1.1516729593276978,
"learning_rate": 2.1176470588235296e-05,
"loss": 2.0807,
"step": 18
},
{
"epoch": 1.1764705882352942,
"grad_norm": 1.1192710399627686,
"learning_rate": 2.3529411764705884e-05,
"loss": 1.9824,
"step": 20
},
{
"epoch": 1.2941176470588236,
"grad_norm": 1.0268539190292358,
"learning_rate": 2.5882352941176475e-05,
"loss": 1.9392,
"step": 22
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.977662980556488,
"learning_rate": 2.823529411764706e-05,
"loss": 1.8069,
"step": 24
},
{
"epoch": 1.5294117647058822,
"grad_norm": 0.9665217995643616,
"learning_rate": 3.058823529411765e-05,
"loss": 1.7689,
"step": 26
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.9652373790740967,
"learning_rate": 3.294117647058824e-05,
"loss": 1.6125,
"step": 28
},
{
"epoch": 1.7647058823529411,
"grad_norm": 0.8798425197601318,
"learning_rate": 3.529411764705883e-05,
"loss": 1.5888,
"step": 30
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.903175950050354,
"learning_rate": 3.7647058823529415e-05,
"loss": 1.4489,
"step": 32
},
{
"epoch": 2.0,
"grad_norm": 0.8608154058456421,
"learning_rate": 4e-05,
"loss": 1.45,
"step": 34
},
{
"epoch": 2.0,
"eval_loss": 1.3698946237564087,
"eval_runtime": 3.2792,
"eval_samples_per_second": 10.978,
"eval_steps_per_second": 10.978,
"step": 34
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.8925697207450867,
"learning_rate": 4.235294117647059e-05,
"loss": 1.3552,
"step": 36
},
{
"epoch": 2.235294117647059,
"grad_norm": 0.8442366123199463,
"learning_rate": 4.470588235294118e-05,
"loss": 1.2553,
"step": 38
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.8634824752807617,
"learning_rate": 4.705882352941177e-05,
"loss": 1.1473,
"step": 40
},
{
"epoch": 2.4705882352941178,
"grad_norm": 0.7777335047721863,
"learning_rate": 4.9411764705882355e-05,
"loss": 1.1648,
"step": 42
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.6794209480285645,
"learning_rate": 5.176470588235295e-05,
"loss": 1.092,
"step": 44
},
{
"epoch": 2.7058823529411766,
"grad_norm": 0.6069318056106567,
"learning_rate": 5.411764705882353e-05,
"loss": 1.1496,
"step": 46
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.5978594422340393,
"learning_rate": 5.647058823529412e-05,
"loss": 1.0745,
"step": 48
},
{
"epoch": 2.9411764705882355,
"grad_norm": 0.6143906116485596,
"learning_rate": 5.882352941176471e-05,
"loss": 1.025,
"step": 50
},
{
"epoch": 3.0,
"eval_loss": 1.0321526527404785,
"eval_runtime": 3.2435,
"eval_samples_per_second": 11.099,
"eval_steps_per_second": 11.099,
"step": 51
},
{
"epoch": 3.0588235294117645,
"grad_norm": 0.5873621702194214,
"learning_rate": 6.11764705882353e-05,
"loss": 0.943,
"step": 52
},
{
"epoch": 3.176470588235294,
"grad_norm": 0.5609561800956726,
"learning_rate": 6.352941176470588e-05,
"loss": 0.9986,
"step": 54
},
{
"epoch": 3.2941176470588234,
"grad_norm": 0.5394548177719116,
"learning_rate": 6.588235294117648e-05,
"loss": 0.9848,
"step": 56
},
{
"epoch": 3.411764705882353,
"grad_norm": 0.5527498722076416,
"learning_rate": 6.823529411764707e-05,
"loss": 1.0181,
"step": 58
},
{
"epoch": 3.5294117647058822,
"grad_norm": 0.5643818974494934,
"learning_rate": 7.058823529411765e-05,
"loss": 0.9044,
"step": 60
},
{
"epoch": 3.6470588235294117,
"grad_norm": 0.5956369042396545,
"learning_rate": 7.294117647058823e-05,
"loss": 0.9518,
"step": 62
},
{
"epoch": 3.764705882352941,
"grad_norm": 0.566049337387085,
"learning_rate": 7.529411764705883e-05,
"loss": 0.9283,
"step": 64
},
{
"epoch": 3.8823529411764706,
"grad_norm": 0.5462525486946106,
"learning_rate": 7.764705882352942e-05,
"loss": 0.9133,
"step": 66
},
{
"epoch": 4.0,
"grad_norm": 0.6023619174957275,
"learning_rate": 8e-05,
"loss": 0.8935,
"step": 68
},
{
"epoch": 4.0,
"eval_loss": 0.961121141910553,
"eval_runtime": 3.2502,
"eval_samples_per_second": 11.076,
"eval_steps_per_second": 11.076,
"step": 68
},
{
"epoch": 4.117647058823529,
"grad_norm": 0.558571457862854,
"learning_rate": 8.23529411764706e-05,
"loss": 0.8811,
"step": 70
},
{
"epoch": 4.235294117647059,
"grad_norm": 0.6055217981338501,
"learning_rate": 8.470588235294118e-05,
"loss": 0.8486,
"step": 72
},
{
"epoch": 4.352941176470588,
"grad_norm": 0.5949888229370117,
"learning_rate": 8.705882352941177e-05,
"loss": 0.8664,
"step": 74
},
{
"epoch": 4.470588235294118,
"grad_norm": 0.6656898260116577,
"learning_rate": 8.941176470588236e-05,
"loss": 0.8458,
"step": 76
},
{
"epoch": 4.588235294117647,
"grad_norm": 0.6678957939147949,
"learning_rate": 9.176470588235295e-05,
"loss": 0.8839,
"step": 78
},
{
"epoch": 4.705882352941177,
"grad_norm": 0.6404339671134949,
"learning_rate": 9.411764705882353e-05,
"loss": 0.8121,
"step": 80
},
{
"epoch": 4.823529411764706,
"grad_norm": 0.642274796962738,
"learning_rate": 9.647058823529412e-05,
"loss": 0.8434,
"step": 82
},
{
"epoch": 4.9411764705882355,
"grad_norm": 0.7844101786613464,
"learning_rate": 9.882352941176471e-05,
"loss": 0.827,
"step": 84
},
{
"epoch": 5.0,
"eval_loss": 0.933869481086731,
"eval_runtime": 3.1753,
"eval_samples_per_second": 11.337,
"eval_steps_per_second": 11.337,
"step": 85
},
{
"epoch": 5.0588235294117645,
"grad_norm": 0.6803512573242188,
"learning_rate": 9.99995783847866e-05,
"loss": 0.7698,
"step": 86
},
{
"epoch": 5.176470588235294,
"grad_norm": 0.6696349382400513,
"learning_rate": 9.999620550574153e-05,
"loss": 0.787,
"step": 88
},
{
"epoch": 5.294117647058823,
"grad_norm": 0.6686673164367676,
"learning_rate": 9.998945997517956e-05,
"loss": 0.7287,
"step": 90
},
{
"epoch": 5.411764705882353,
"grad_norm": 0.8087121844291687,
"learning_rate": 9.997934224814173e-05,
"loss": 0.7254,
"step": 92
},
{
"epoch": 5.529411764705882,
"grad_norm": 0.9504140019416809,
"learning_rate": 9.996585300715116e-05,
"loss": 0.7064,
"step": 94
},
{
"epoch": 5.647058823529412,
"grad_norm": 0.8831130266189575,
"learning_rate": 9.994899316216708e-05,
"loss": 0.6844,
"step": 96
},
{
"epoch": 5.764705882352941,
"grad_norm": 0.8389394283294678,
"learning_rate": 9.992876385052345e-05,
"loss": 0.7467,
"step": 98
},
{
"epoch": 5.882352941176471,
"grad_norm": 0.861285924911499,
"learning_rate": 9.990516643685222e-05,
"loss": 0.773,
"step": 100
},
{
"epoch": 6.0,
"grad_norm": 0.8316490650177002,
"learning_rate": 9.987820251299122e-05,
"loss": 0.7407,
"step": 102
},
{
"epoch": 6.0,
"eval_loss": 0.9278689622879028,
"eval_runtime": 3.2054,
"eval_samples_per_second": 11.231,
"eval_steps_per_second": 11.231,
"step": 102
},
{
"epoch": 6.117647058823529,
"grad_norm": 0.8588041067123413,
"learning_rate": 9.984787389787688e-05,
"loss": 0.5792,
"step": 104
},
{
"epoch": 6.235294117647059,
"grad_norm": 0.874444305896759,
"learning_rate": 9.981418263742148e-05,
"loss": 0.6258,
"step": 106
},
{
"epoch": 6.352941176470588,
"grad_norm": 0.8826048374176025,
"learning_rate": 9.977713100437509e-05,
"loss": 0.5959,
"step": 108
},
{
"epoch": 6.470588235294118,
"grad_norm": 1.0741201639175415,
"learning_rate": 9.973672149817232e-05,
"loss": 0.599,
"step": 110
},
{
"epoch": 6.588235294117647,
"grad_norm": 2.03265643119812,
"learning_rate": 9.96929568447637e-05,
"loss": 0.5829,
"step": 112
},
{
"epoch": 6.705882352941177,
"grad_norm": 1.414567470550537,
"learning_rate": 9.964583999643174e-05,
"loss": 0.6267,
"step": 114
},
{
"epoch": 6.823529411764706,
"grad_norm": 1.147159218788147,
"learning_rate": 9.95953741315919e-05,
"loss": 0.5487,
"step": 116
},
{
"epoch": 6.9411764705882355,
"grad_norm": 1.2041115760803223,
"learning_rate": 9.954156265457801e-05,
"loss": 0.5958,
"step": 118
},
{
"epoch": 7.0,
"eval_loss": 0.979455292224884,
"eval_runtime": 3.241,
"eval_samples_per_second": 11.108,
"eval_steps_per_second": 11.108,
"step": 119
},
{
"epoch": 7.0588235294117645,
"grad_norm": 1.0014081001281738,
"learning_rate": 9.948440919541278e-05,
"loss": 0.5794,
"step": 120
},
{
"epoch": 7.176470588235294,
"grad_norm": 1.009001612663269,
"learning_rate": 9.942391760956277e-05,
"loss": 0.4625,
"step": 122
},
{
"epoch": 7.294117647058823,
"grad_norm": 1.1101174354553223,
"learning_rate": 9.936009197767845e-05,
"loss": 0.4484,
"step": 124
},
{
"epoch": 7.411764705882353,
"grad_norm": 1.410061240196228,
"learning_rate": 9.929293660531888e-05,
"loss": 0.4385,
"step": 126
},
{
"epoch": 7.529411764705882,
"grad_norm": 1.6077678203582764,
"learning_rate": 9.922245602266118e-05,
"loss": 0.4205,
"step": 128
},
{
"epoch": 7.647058823529412,
"grad_norm": 1.6185195446014404,
"learning_rate": 9.91486549841951e-05,
"loss": 0.4509,
"step": 130
},
{
"epoch": 7.764705882352941,
"grad_norm": 1.506198763847351,
"learning_rate": 9.90715384684021e-05,
"loss": 0.434,
"step": 132
},
{
"epoch": 7.882352941176471,
"grad_norm": 1.2955230474472046,
"learning_rate": 9.899111167741966e-05,
"loss": 0.4812,
"step": 134
},
{
"epoch": 8.0,
"grad_norm": 1.3530545234680176,
"learning_rate": 9.890738003669029e-05,
"loss": 0.4738,
"step": 136
},
{
"epoch": 8.0,
"eval_loss": 1.0904909372329712,
"eval_runtime": 3.2248,
"eval_samples_per_second": 11.163,
"eval_steps_per_second": 11.163,
"step": 136
},
{
"epoch": 8.117647058823529,
"grad_norm": 1.145385980606079,
"learning_rate": 9.882034919459555e-05,
"loss": 0.3233,
"step": 138
},
{
"epoch": 8.235294117647058,
"grad_norm": 1.1618925333023071,
"learning_rate": 9.873002502207503e-05,
"loss": 0.3232,
"step": 140
},
{
"epoch": 8.352941176470589,
"grad_norm": 1.4919087886810303,
"learning_rate": 9.863641361223024e-05,
"loss": 0.2922,
"step": 142
},
{
"epoch": 8.470588235294118,
"grad_norm": 1.4797271490097046,
"learning_rate": 9.853952127991372e-05,
"loss": 0.3519,
"step": 144
},
{
"epoch": 8.588235294117647,
"grad_norm": 2.053419351577759,
"learning_rate": 9.843935456130295e-05,
"loss": 0.3247,
"step": 146
},
{
"epoch": 8.705882352941176,
"grad_norm": 1.6330363750457764,
"learning_rate": 9.833592021345937e-05,
"loss": 0.2855,
"step": 148
},
{
"epoch": 8.823529411764707,
"grad_norm": 1.7503317594528198,
"learning_rate": 9.822922521387276e-05,
"loss": 0.353,
"step": 150
},
{
"epoch": 8.941176470588236,
"grad_norm": 1.9878220558166504,
"learning_rate": 9.811927675999036e-05,
"loss": 0.3328,
"step": 152
},
{
"epoch": 9.0,
"eval_loss": 1.3366488218307495,
"eval_runtime": 3.1728,
"eval_samples_per_second": 11.346,
"eval_steps_per_second": 11.346,
"step": 153
},
{
"epoch": 9.058823529411764,
"grad_norm": 1.2611256837844849,
"learning_rate": 9.800608226873142e-05,
"loss": 0.2561,
"step": 154
},
{
"epoch": 9.176470588235293,
"grad_norm": 1.37074875831604,
"learning_rate": 9.788964937598689e-05,
"loss": 0.2339,
"step": 156
},
{
"epoch": 9.294117647058824,
"grad_norm": 1.5084260702133179,
"learning_rate": 9.776998593610428e-05,
"loss": 0.2086,
"step": 158
},
{
"epoch": 9.411764705882353,
"grad_norm": 1.6777526140213013,
"learning_rate": 9.764710002135784e-05,
"loss": 0.231,
"step": 160
},
{
"epoch": 9.529411764705882,
"grad_norm": 1.6174005270004272,
"learning_rate": 9.752099992140399e-05,
"loss": 0.1978,
"step": 162
},
{
"epoch": 9.647058823529411,
"grad_norm": 1.7943024635314941,
"learning_rate": 9.739169414272217e-05,
"loss": 0.2341,
"step": 164
},
{
"epoch": 9.764705882352942,
"grad_norm": 1.7131166458129883,
"learning_rate": 9.725919140804099e-05,
"loss": 0.2301,
"step": 166
},
{
"epoch": 9.882352941176471,
"grad_norm": 1.6622118949890137,
"learning_rate": 9.71235006557497e-05,
"loss": 0.2313,
"step": 168
},
{
"epoch": 10.0,
"grad_norm": 1.804017186164856,
"learning_rate": 9.698463103929542e-05,
"loss": 0.2352,
"step": 170
},
{
"epoch": 10.0,
"eval_loss": 1.5624709129333496,
"eval_runtime": 3.2479,
"eval_samples_per_second": 11.084,
"eval_steps_per_second": 11.084,
"step": 170
},
{
"epoch": 10.117647058823529,
"grad_norm": 1.4273288249969482,
"learning_rate": 9.684259192656553e-05,
"loss": 0.1517,
"step": 172
},
{
"epoch": 10.235294117647058,
"grad_norm": 1.3310012817382812,
"learning_rate": 9.669739289925577e-05,
"loss": 0.1501,
"step": 174
},
{
"epoch": 10.352941176470589,
"grad_norm": 1.6097581386566162,
"learning_rate": 9.654904375222385e-05,
"loss": 0.1573,
"step": 176
},
{
"epoch": 10.470588235294118,
"grad_norm": 1.6645879745483398,
"learning_rate": 9.639755449282875e-05,
"loss": 0.1506,
"step": 178
},
{
"epoch": 10.588235294117647,
"grad_norm": 1.8366305828094482,
"learning_rate": 9.62429353402556e-05,
"loss": 0.1654,
"step": 180
},
{
"epoch": 10.705882352941176,
"grad_norm": 1.6256617307662964,
"learning_rate": 9.608519672482636e-05,
"loss": 0.1921,
"step": 182
},
{
"epoch": 10.823529411764707,
"grad_norm": 1.610995888710022,
"learning_rate": 9.592434928729616e-05,
"loss": 0.167,
"step": 184
},
{
"epoch": 10.941176470588236,
"grad_norm": 1.6675574779510498,
"learning_rate": 9.576040387813552e-05,
"loss": 0.1754,
"step": 186
},
{
"epoch": 11.0,
"eval_loss": 1.6811158657073975,
"eval_runtime": 3.1996,
"eval_samples_per_second": 11.252,
"eval_steps_per_second": 11.252,
"step": 187
},
{
"epoch": 11.058823529411764,
"grad_norm": 1.390575885772705,
"learning_rate": 9.559337155679842e-05,
"loss": 0.1403,
"step": 188
},
{
"epoch": 11.176470588235293,
"grad_norm": 1.263203740119934,
"learning_rate": 9.542326359097619e-05,
"loss": 0.1194,
"step": 190
},
{
"epoch": 11.294117647058824,
"grad_norm": 1.2151466608047485,
"learning_rate": 9.525009145583745e-05,
"loss": 0.1169,
"step": 192
},
{
"epoch": 11.411764705882353,
"grad_norm": 1.5443471670150757,
"learning_rate": 9.507386683325404e-05,
"loss": 0.1249,
"step": 194
},
{
"epoch": 11.529411764705882,
"grad_norm": 1.971842885017395,
"learning_rate": 9.489460161101291e-05,
"loss": 0.1374,
"step": 196
},
{
"epoch": 11.647058823529411,
"grad_norm": 1.6832834482192993,
"learning_rate": 9.471230788201429e-05,
"loss": 0.1198,
"step": 198
},
{
"epoch": 11.764705882352942,
"grad_norm": 1.4485572576522827,
"learning_rate": 9.452699794345581e-05,
"loss": 0.12,
"step": 200
},
{
"epoch": 11.882352941176471,
"grad_norm": 1.4913806915283203,
"learning_rate": 9.43386842960031e-05,
"loss": 0.1121,
"step": 202
},
{
"epoch": 12.0,
"grad_norm": 1.5377891063690186,
"learning_rate": 9.414737964294636e-05,
"loss": 0.1229,
"step": 204
},
{
"epoch": 12.0,
"eval_loss": 1.821463942527771,
"eval_runtime": 3.2352,
"eval_samples_per_second": 11.127,
"eval_steps_per_second": 11.127,
"step": 204
},
{
"epoch": 12.117647058823529,
"grad_norm": 0.9307077527046204,
"learning_rate": 9.395309688934351e-05,
"loss": 0.0877,
"step": 206
},
{
"epoch": 12.235294117647058,
"grad_norm": 1.4314453601837158,
"learning_rate": 9.375584914114963e-05,
"loss": 0.0962,
"step": 208
},
{
"epoch": 12.352941176470589,
"grad_norm": 1.3369566202163696,
"learning_rate": 9.355564970433288e-05,
"loss": 0.1064,
"step": 210
},
{
"epoch": 12.470588235294118,
"grad_norm": 1.500665545463562,
"learning_rate": 9.335251208397684e-05,
"loss": 0.0932,
"step": 212
},
{
"epoch": 12.588235294117647,
"grad_norm": 1.5071280002593994,
"learning_rate": 9.314644998336949e-05,
"loss": 0.0974,
"step": 214
},
{
"epoch": 12.705882352941176,
"grad_norm": 1.623448133468628,
"learning_rate": 9.293747730307889e-05,
"loss": 0.1044,
"step": 216
},
{
"epoch": 12.823529411764707,
"grad_norm": 1.0675303936004639,
"learning_rate": 9.272560814001539e-05,
"loss": 0.0995,
"step": 218
},
{
"epoch": 12.941176470588236,
"grad_norm": 2.572380304336548,
"learning_rate": 9.251085678648072e-05,
"loss": 0.1126,
"step": 220
},
{
"epoch": 13.0,
"eval_loss": 1.8674242496490479,
"eval_runtime": 3.2508,
"eval_samples_per_second": 11.074,
"eval_steps_per_second": 11.074,
"step": 221
},
{
"epoch": 13.0,
"step": 221,
"total_flos": 7837976520294400.0,
"train_loss": 0.7398496248940537,
"train_runtime": 766.0105,
"train_samples_per_second": 8.877,
"train_steps_per_second": 1.11
}
],
"logging_steps": 2,
"max_steps": 850,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7837976520294400.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}