pszemraj's picture
load model from drive and convert
0dded0f
raw
history blame
18.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.8158640226628895,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4e-05,
"loss": 1.9088,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 8e-05,
"loss": 1.9287,
"step": 4
},
{
"epoch": 0.04,
"learning_rate": 9.99989723479183e-05,
"loss": 1.7736,
"step": 6
},
{
"epoch": 0.05,
"learning_rate": 9.999075138471951e-05,
"loss": 1.9095,
"step": 8
},
{
"epoch": 0.06,
"learning_rate": 9.99743108100344e-05,
"loss": 1.918,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 9.994965332706573e-05,
"loss": 1.8706,
"step": 12
},
{
"epoch": 0.08,
"learning_rate": 9.991678299006205e-05,
"loss": 1.9351,
"step": 14
},
{
"epoch": 0.1,
"learning_rate": 9.987570520365104e-05,
"loss": 1.8793,
"step": 16
},
{
"epoch": 0.11,
"learning_rate": 9.982642672195092e-05,
"loss": 1.8827,
"step": 18
},
{
"epoch": 0.12,
"learning_rate": 9.976895564745991e-05,
"loss": 1.8303,
"step": 20
},
{
"epoch": 0.13,
"learning_rate": 9.970330142972401e-05,
"loss": 1.8379,
"step": 22
},
{
"epoch": 0.15,
"learning_rate": 9.962947486378326e-05,
"loss": 1.8987,
"step": 24
},
{
"epoch": 0.16,
"learning_rate": 9.954748808839674e-05,
"loss": 1.9518,
"step": 26
},
{
"epoch": 0.17,
"learning_rate": 9.945735458404681e-05,
"loss": 1.8511,
"step": 28
},
{
"epoch": 0.18,
"learning_rate": 9.935908917072252e-05,
"loss": 1.9064,
"step": 30
},
{
"epoch": 0.19,
"learning_rate": 9.925270800548285e-05,
"loss": 1.8882,
"step": 32
},
{
"epoch": 0.21,
"learning_rate": 9.91382285798002e-05,
"loss": 1.9368,
"step": 34
},
{
"epoch": 0.22,
"learning_rate": 9.901566971668437e-05,
"loss": 1.8747,
"step": 36
},
{
"epoch": 0.23,
"learning_rate": 9.888505156758759e-05,
"loss": 1.9426,
"step": 38
},
{
"epoch": 0.24,
"learning_rate": 9.874639560909117e-05,
"loss": 1.9316,
"step": 40
},
{
"epoch": 0.25,
"learning_rate": 9.859972463937441e-05,
"loss": 1.867,
"step": 42
},
{
"epoch": 0.27,
"learning_rate": 9.844506277446577e-05,
"loss": 1.8927,
"step": 44
},
{
"epoch": 0.28,
"learning_rate": 9.828243544427796e-05,
"loss": 1.9449,
"step": 46
},
{
"epoch": 0.29,
"learning_rate": 9.811186938842645e-05,
"loss": 1.8571,
"step": 48
},
{
"epoch": 0.3,
"learning_rate": 9.793339265183303e-05,
"loss": 1.8346,
"step": 50
},
{
"epoch": 0.31,
"learning_rate": 9.774703458011453e-05,
"loss": 1.9036,
"step": 52
},
{
"epoch": 0.33,
"learning_rate": 9.755282581475769e-05,
"loss": 1.8838,
"step": 54
},
{
"epoch": 0.34,
"learning_rate": 9.735079828808107e-05,
"loss": 1.959,
"step": 56
},
{
"epoch": 0.35,
"learning_rate": 9.714098521798465e-05,
"loss": 1.9376,
"step": 58
},
{
"epoch": 0.36,
"learning_rate": 9.692342110248802e-05,
"loss": 1.9326,
"step": 60
},
{
"epoch": 0.37,
"learning_rate": 9.669814171405816e-05,
"loss": 1.9344,
"step": 62
},
{
"epoch": 0.39,
"learning_rate": 9.64651840937276e-05,
"loss": 1.9038,
"step": 64
},
{
"epoch": 0.4,
"learning_rate": 9.622458654500409e-05,
"loss": 1.9294,
"step": 66
},
{
"epoch": 0.41,
"learning_rate": 9.597638862757255e-05,
"loss": 1.847,
"step": 68
},
{
"epoch": 0.42,
"learning_rate": 9.572063115079063e-05,
"loss": 1.9167,
"step": 70
},
{
"epoch": 0.44,
"learning_rate": 9.545735616697875e-05,
"loss": 1.8309,
"step": 72
},
{
"epoch": 0.45,
"learning_rate": 9.518660696450568e-05,
"loss": 1.8982,
"step": 74
},
{
"epoch": 0.46,
"learning_rate": 9.490842806067095e-05,
"loss": 1.8734,
"step": 76
},
{
"epoch": 0.47,
"learning_rate": 9.46228651943853e-05,
"loss": 1.8738,
"step": 78
},
{
"epoch": 0.48,
"learning_rate": 9.432996531865002e-05,
"loss": 1.9731,
"step": 80
},
{
"epoch": 0.5,
"learning_rate": 9.40297765928369e-05,
"loss": 1.9105,
"step": 82
},
{
"epoch": 0.51,
"learning_rate": 9.372234837476978e-05,
"loss": 1.8629,
"step": 84
},
{
"epoch": 0.52,
"learning_rate": 9.340773121260893e-05,
"loss": 1.8565,
"step": 86
},
{
"epoch": 0.53,
"learning_rate": 9.308597683653975e-05,
"loss": 1.9962,
"step": 88
},
{
"epoch": 0.54,
"learning_rate": 9.275713815026731e-05,
"loss": 1.8981,
"step": 90
},
{
"epoch": 0.56,
"learning_rate": 9.242126922231763e-05,
"loss": 1.9132,
"step": 92
},
{
"epoch": 0.57,
"learning_rate": 9.207842527714767e-05,
"loss": 1.9159,
"step": 94
},
{
"epoch": 0.58,
"learning_rate": 9.172866268606513e-05,
"loss": 1.9617,
"step": 96
},
{
"epoch": 0.59,
"learning_rate": 9.137203895795983e-05,
"loss": 1.8794,
"step": 98
},
{
"epoch": 0.6,
"learning_rate": 9.10086127298478e-05,
"loss": 2.0122,
"step": 100
},
{
"epoch": 0.62,
"learning_rate": 9.063844375723014e-05,
"loss": 1.8813,
"step": 102
},
{
"epoch": 0.63,
"learning_rate": 9.02615929042678e-05,
"loss": 1.9717,
"step": 104
},
{
"epoch": 0.64,
"learning_rate": 8.987812213377424e-05,
"loss": 1.9022,
"step": 106
},
{
"epoch": 0.65,
"learning_rate": 8.948809449702711e-05,
"loss": 1.9205,
"step": 108
},
{
"epoch": 0.66,
"learning_rate": 8.90915741234015e-05,
"loss": 1.8173,
"step": 110
},
{
"epoch": 0.68,
"learning_rate": 8.868862620982534e-05,
"loss": 2.0031,
"step": 112
},
{
"epoch": 0.69,
"learning_rate": 8.827931701005974e-05,
"loss": 1.9448,
"step": 114
},
{
"epoch": 0.7,
"learning_rate": 8.786371382380528e-05,
"loss": 1.9093,
"step": 116
},
{
"epoch": 0.71,
"learning_rate": 8.744188498563641e-05,
"loss": 1.9093,
"step": 118
},
{
"epoch": 0.73,
"learning_rate": 8.701389985376578e-05,
"loss": 1.8793,
"step": 120
},
{
"epoch": 0.74,
"learning_rate": 8.657982879864007e-05,
"loss": 1.9477,
"step": 122
},
{
"epoch": 0.75,
"learning_rate": 8.613974319136958e-05,
"loss": 1.8808,
"step": 124
},
{
"epoch": 0.76,
"learning_rate": 8.569371539199316e-05,
"loss": 1.9425,
"step": 126
},
{
"epoch": 0.77,
"learning_rate": 8.524181873758059e-05,
"loss": 1.8945,
"step": 128
},
{
"epoch": 0.79,
"learning_rate": 8.478412753017433e-05,
"loss": 1.9579,
"step": 130
},
{
"epoch": 0.8,
"learning_rate": 8.432071702457252e-05,
"loss": 1.9224,
"step": 132
},
{
"epoch": 0.81,
"learning_rate": 8.385166341595548e-05,
"loss": 1.9117,
"step": 134
},
{
"epoch": 0.82,
"learning_rate": 8.33770438273574e-05,
"loss": 1.9455,
"step": 136
},
{
"epoch": 0.83,
"learning_rate": 8.289693629698564e-05,
"loss": 1.8856,
"step": 138
},
{
"epoch": 0.85,
"learning_rate": 8.241141976538943e-05,
"loss": 1.915,
"step": 140
},
{
"epoch": 0.86,
"learning_rate": 8.192057406248028e-05,
"loss": 1.9302,
"step": 142
},
{
"epoch": 0.87,
"learning_rate": 8.142447989440618e-05,
"loss": 1.8647,
"step": 144
},
{
"epoch": 0.88,
"learning_rate": 8.092321883028158e-05,
"loss": 1.8653,
"step": 146
},
{
"epoch": 0.89,
"learning_rate": 8.041687328877567e-05,
"loss": 1.9183,
"step": 148
},
{
"epoch": 0.91,
"learning_rate": 7.990552652456081e-05,
"loss": 1.852,
"step": 150
},
{
"epoch": 0.92,
"learning_rate": 7.938926261462366e-05,
"loss": 1.9445,
"step": 152
},
{
"epoch": 0.93,
"learning_rate": 7.886816644444098e-05,
"loss": 1.8727,
"step": 154
},
{
"epoch": 0.94,
"learning_rate": 7.83423236940225e-05,
"loss": 1.9182,
"step": 156
},
{
"epoch": 0.95,
"learning_rate": 7.781182082382325e-05,
"loss": 1.9708,
"step": 158
},
{
"epoch": 0.97,
"learning_rate": 7.727674506052743e-05,
"loss": 1.8826,
"step": 160
},
{
"epoch": 0.98,
"learning_rate": 7.673718438270648e-05,
"loss": 1.9755,
"step": 162
},
{
"epoch": 0.99,
"learning_rate": 7.619322750635327e-05,
"loss": 1.947,
"step": 164
},
{
"epoch": 1.01,
"learning_rate": 7.564496387029532e-05,
"loss": 2.3347,
"step": 166
},
{
"epoch": 1.02,
"learning_rate": 7.509248362148889e-05,
"loss": 1.9177,
"step": 168
},
{
"epoch": 1.03,
"learning_rate": 7.45358776001969e-05,
"loss": 1.8889,
"step": 170
},
{
"epoch": 1.04,
"learning_rate": 7.39752373250527e-05,
"loss": 1.8581,
"step": 172
},
{
"epoch": 1.05,
"learning_rate": 7.34106549780123e-05,
"loss": 1.8564,
"step": 174
},
{
"epoch": 1.07,
"learning_rate": 7.284222338919758e-05,
"loss": 1.8354,
"step": 176
},
{
"epoch": 1.08,
"learning_rate": 7.227003602163295e-05,
"loss": 1.89,
"step": 178
},
{
"epoch": 1.09,
"learning_rate": 7.169418695587791e-05,
"loss": 1.9877,
"step": 180
},
{
"epoch": 1.1,
"learning_rate": 7.1114770874558e-05,
"loss": 1.7948,
"step": 182
},
{
"epoch": 1.11,
"learning_rate": 7.05318830467969e-05,
"loss": 1.9217,
"step": 184
},
{
"epoch": 1.13,
"learning_rate": 6.99456193125521e-05,
"loss": 1.7795,
"step": 186
},
{
"epoch": 1.14,
"learning_rate": 6.935607606685642e-05,
"loss": 1.9189,
"step": 188
},
{
"epoch": 1.15,
"learning_rate": 6.876335024396872e-05,
"loss": 1.8445,
"step": 190
},
{
"epoch": 1.16,
"learning_rate": 6.816753930143558e-05,
"loss": 1.852,
"step": 192
},
{
"epoch": 1.18,
"learning_rate": 6.756874120406714e-05,
"loss": 1.8622,
"step": 194
},
{
"epoch": 1.19,
"learning_rate": 6.696705440782938e-05,
"loss": 1.9092,
"step": 196
},
{
"epoch": 1.2,
"learning_rate": 6.636257784365584e-05,
"loss": 1.8074,
"step": 198
},
{
"epoch": 1.21,
"learning_rate": 6.575541090118105e-05,
"loss": 1.8295,
"step": 200
},
{
"epoch": 1.22,
"learning_rate": 6.514565341239861e-05,
"loss": 1.9595,
"step": 202
},
{
"epoch": 1.24,
"learning_rate": 6.453340563524669e-05,
"loss": 1.8733,
"step": 204
},
{
"epoch": 1.25,
"learning_rate": 6.391876823712317e-05,
"loss": 1.8277,
"step": 206
},
{
"epoch": 1.26,
"learning_rate": 6.330184227833376e-05,
"loss": 1.914,
"step": 208
},
{
"epoch": 1.27,
"learning_rate": 6.268272919547537e-05,
"loss": 1.8684,
"step": 210
},
{
"epoch": 1.28,
"learning_rate": 6.206153078475763e-05,
"loss": 1.9092,
"step": 212
},
{
"epoch": 1.3,
"learning_rate": 6.143834918526527e-05,
"loss": 1.7996,
"step": 214
},
{
"epoch": 1.31,
"learning_rate": 6.081328686216418e-05,
"loss": 1.8958,
"step": 216
},
{
"epoch": 1.32,
"learning_rate": 6.0186446589853784e-05,
"loss": 1.9597,
"step": 218
},
{
"epoch": 1.33,
"learning_rate": 5.955793143506863e-05,
"loss": 1.9054,
"step": 220
},
{
"epoch": 1.34,
"learning_rate": 5.8927844739931834e-05,
"loss": 1.8064,
"step": 222
},
{
"epoch": 1.36,
"learning_rate": 5.82962901049634e-05,
"loss": 1.9105,
"step": 224
},
{
"epoch": 1.37,
"learning_rate": 5.766337137204579e-05,
"loss": 1.8535,
"step": 226
},
{
"epoch": 1.38,
"learning_rate": 5.7029192607350146e-05,
"loss": 1.8961,
"step": 228
},
{
"epoch": 1.39,
"learning_rate": 5.6393858084225305e-05,
"loss": 1.8679,
"step": 230
},
{
"epoch": 1.4,
"learning_rate": 5.575747226605298e-05,
"loss": 1.8808,
"step": 232
},
{
"epoch": 1.42,
"learning_rate": 5.512013978907157e-05,
"loss": 1.8165,
"step": 234
},
{
"epoch": 1.43,
"learning_rate": 5.448196544517168e-05,
"loss": 1.849,
"step": 236
},
{
"epoch": 1.44,
"learning_rate": 5.384305416466584e-05,
"loss": 1.8585,
"step": 238
},
{
"epoch": 1.45,
"learning_rate": 5.320351099903565e-05,
"loss": 1.8477,
"step": 240
},
{
"epoch": 1.47,
"learning_rate": 5.256344110365896e-05,
"loss": 1.84,
"step": 242
},
{
"epoch": 1.48,
"learning_rate": 5.192294972051992e-05,
"loss": 1.847,
"step": 244
},
{
"epoch": 1.49,
"learning_rate": 5.128214216090478e-05,
"loss": 1.9149,
"step": 246
},
{
"epoch": 1.5,
"learning_rate": 5.064112378808637e-05,
"loss": 1.8937,
"step": 248
},
{
"epoch": 1.51,
"learning_rate": 5e-05,
"loss": 1.8805,
"step": 250
},
{
"epoch": 1.53,
"learning_rate": 4.935887621191364e-05,
"loss": 1.862,
"step": 252
},
{
"epoch": 1.54,
"learning_rate": 4.871785783909523e-05,
"loss": 1.8958,
"step": 254
},
{
"epoch": 1.55,
"learning_rate": 4.807705027948008e-05,
"loss": 1.9492,
"step": 256
},
{
"epoch": 1.56,
"learning_rate": 4.743655889634105e-05,
"loss": 1.918,
"step": 258
},
{
"epoch": 1.57,
"learning_rate": 4.679648900096436e-05,
"loss": 1.9169,
"step": 260
},
{
"epoch": 1.59,
"learning_rate": 4.6156945835334184e-05,
"loss": 1.8927,
"step": 262
},
{
"epoch": 1.6,
"learning_rate": 4.551803455482833e-05,
"loss": 1.9368,
"step": 264
},
{
"epoch": 1.61,
"learning_rate": 4.487986021092844e-05,
"loss": 1.9573,
"step": 266
},
{
"epoch": 1.62,
"learning_rate": 4.424252773394704e-05,
"loss": 1.9401,
"step": 268
},
{
"epoch": 1.63,
"learning_rate": 4.3606141915774693e-05,
"loss": 1.8759,
"step": 270
},
{
"epoch": 1.65,
"learning_rate": 4.297080739264987e-05,
"loss": 1.9353,
"step": 272
},
{
"epoch": 1.66,
"learning_rate": 4.23366286279542e-05,
"loss": 1.8375,
"step": 274
},
{
"epoch": 1.67,
"learning_rate": 4.1703709895036625e-05,
"loss": 1.9202,
"step": 276
},
{
"epoch": 1.68,
"learning_rate": 4.107215526006817e-05,
"loss": 1.9248,
"step": 278
},
{
"epoch": 1.69,
"learning_rate": 4.04420685649314e-05,
"loss": 1.7924,
"step": 280
},
{
"epoch": 1.71,
"learning_rate": 3.981355341014623e-05,
"loss": 1.7682,
"step": 282
},
{
"epoch": 1.72,
"learning_rate": 3.9186713137835826e-05,
"loss": 1.8612,
"step": 284
},
{
"epoch": 1.73,
"learning_rate": 3.856165081473474e-05,
"loss": 1.8759,
"step": 286
},
{
"epoch": 1.74,
"learning_rate": 3.793846921524237e-05,
"loss": 1.875,
"step": 288
},
{
"epoch": 1.76,
"learning_rate": 3.731727080452464e-05,
"loss": 1.8429,
"step": 290
},
{
"epoch": 1.77,
"learning_rate": 3.6698157721666246e-05,
"loss": 1.907,
"step": 292
},
{
"epoch": 1.78,
"learning_rate": 3.608123176287685e-05,
"loss": 1.8561,
"step": 294
},
{
"epoch": 1.79,
"learning_rate": 3.5466594364753326e-05,
"loss": 1.9138,
"step": 296
},
{
"epoch": 1.8,
"learning_rate": 3.48543465876014e-05,
"loss": 1.893,
"step": 298
},
{
"epoch": 1.82,
"learning_rate": 3.424458909881897e-05,
"loss": 1.8505,
"step": 300
}
],
"max_steps": 495,
"num_train_epochs": 3,
"total_flos": 1.4182542633473147e+18,
"trial_name": null,
"trial_params": null
}