DeepDream2045 commited on
Commit
5e1ad32
·
verified ·
1 Parent(s): 27642a1

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "v_proj",
24
- "q_proj",
25
- "gate_proj",
26
- "k_proj",
27
  "down_proj",
 
 
28
  "o_proj",
29
- "up_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "v_proj",
 
 
 
24
  "down_proj",
25
+ "up_proj",
26
+ "q_proj",
27
  "o_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77580874a6cea995a47b3d202895889d9bccee5ec7fe1953fcdade01fedc1c4b
3
  size 503407240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bafa4cca50aec18233c0193fd3f0088c21986310601de761008a486806d0d9af
3
  size 503407240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be688fe6e47744c7892449100e45315aa22bfb0afb28dfd076113c6300e8c501
3
  size 1007201074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c20a9def8a1ffc76af75f59b81e726230c4c8bc8dd6257cd31a14cd3f0df8e35
3
  size 1007201074
last-checkpoint/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 1.6343824863433838,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
  "epoch": 0.03311806590495115,
5
  "eval_steps": 25,
@@ -10,7 +10,7 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.000662361318099023,
13
- "grad_norm": 41.63633728027344,
14
  "learning_rate": 5e-05,
15
  "loss": 27.5824,
16
  "step": 1
@@ -18,368 +18,368 @@
18
  {
19
  "epoch": 0.000662361318099023,
20
  "eval_loss": 2.2696444988250732,
21
- "eval_runtime": 431.7537,
22
- "eval_samples_per_second": 23.557,
23
- "eval_steps_per_second": 2.946,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.001324722636198046,
28
- "grad_norm": 36.75858688354492,
29
  "learning_rate": 0.0001,
30
  "loss": 27.0301,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.001987083954297069,
35
- "grad_norm": 42.25046920776367,
36
  "learning_rate": 9.989294616193017e-05,
37
- "loss": 30.0,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.002649445272396092,
42
- "grad_norm": 36.78057861328125,
43
  "learning_rate": 9.957224306869053e-05,
44
- "loss": 27.1166,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.003311806590495115,
49
- "grad_norm": 36.5877799987793,
50
  "learning_rate": 9.903926402016153e-05,
51
- "loss": 30.1777,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.003974167908594138,
56
- "grad_norm": 34.792659759521484,
57
  "learning_rate": 9.829629131445342e-05,
58
- "loss": 32.1896,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.004636529226693161,
63
- "grad_norm": 28.476728439331055,
64
  "learning_rate": 9.73465064747553e-05,
65
- "loss": 28.1354,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.005298890544792184,
70
- "grad_norm": 39.693870544433594,
71
  "learning_rate": 9.619397662556435e-05,
72
- "loss": 31.488,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.005961251862891207,
77
- "grad_norm": 32.581214904785156,
78
  "learning_rate": 9.484363707663442e-05,
79
- "loss": 29.9008,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.00662361318099023,
84
- "grad_norm": 34.23081588745117,
85
  "learning_rate": 9.330127018922194e-05,
86
- "loss": 29.6987,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.007285974499089253,
91
- "grad_norm": 39.92532730102539,
92
  "learning_rate": 9.157348061512727e-05,
93
- "loss": 33.3046,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.007948335817188276,
98
- "grad_norm": 37.282161712646484,
99
  "learning_rate": 8.966766701456177e-05,
100
- "loss": 31.1424,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.0086106971352873,
105
- "grad_norm": 36.434452056884766,
106
  "learning_rate": 8.759199037394887e-05,
107
- "loss": 30.9173,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.009273058453386322,
112
- "grad_norm": 34.44896697998047,
113
  "learning_rate": 8.535533905932738e-05,
114
- "loss": 26.2724,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.009935419771485345,
119
- "grad_norm": 25.074344635009766,
120
  "learning_rate": 8.296729075500344e-05,
121
- "loss": 26.8365,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.010597781089584368,
126
- "grad_norm": 20.83970069885254,
127
  "learning_rate": 8.043807145043604e-05,
128
- "loss": 24.8747,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.011260142407683391,
133
- "grad_norm": 20.634695053100586,
134
  "learning_rate": 7.777851165098012e-05,
135
- "loss": 24.7149,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.011922503725782414,
140
- "grad_norm": 22.36671257019043,
141
  "learning_rate": 7.500000000000001e-05,
142
- "loss": 25.9939,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.012584865043881437,
147
- "grad_norm": 23.41545867919922,
148
  "learning_rate": 7.211443451095007e-05,
149
- "loss": 25.4919,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.01324722636198046,
154
- "grad_norm": 23.04401206970215,
155
  "learning_rate": 6.91341716182545e-05,
156
- "loss": 26.2824,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.013909587680079483,
161
- "grad_norm": 24.668642044067383,
162
  "learning_rate": 6.607197326515808e-05,
163
- "loss": 27.4812,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.014571948998178506,
168
- "grad_norm": 24.785856246948242,
169
  "learning_rate": 6.294095225512603e-05,
170
- "loss": 27.3597,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.01523431031627753,
175
- "grad_norm": 28.567190170288086,
176
  "learning_rate": 5.9754516100806423e-05,
177
- "loss": 27.7361,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.015896671634376552,
182
- "grad_norm": 36.09950256347656,
183
  "learning_rate": 5.6526309611002594e-05,
184
- "loss": 29.4177,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.016559032952475575,
189
- "grad_norm": 32.96276092529297,
190
  "learning_rate": 5.327015646150716e-05,
191
- "loss": 29.8551,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.016559032952475575,
196
- "eval_loss": 1.6892707347869873,
197
- "eval_runtime": 435.802,
198
- "eval_samples_per_second": 23.339,
199
- "eval_steps_per_second": 2.919,
200
  "step": 25
201
  },
202
  {
203
  "epoch": 0.0172213942705746,
204
- "grad_norm": 22.903234481811523,
205
  "learning_rate": 5e-05,
206
- "loss": 22.4765,
207
  "step": 26
208
  },
209
  {
210
  "epoch": 0.01788375558867362,
211
- "grad_norm": 19.913135528564453,
212
  "learning_rate": 4.6729843538492847e-05,
213
- "loss": 23.0996,
214
  "step": 27
215
  },
216
  {
217
  "epoch": 0.018546116906772644,
218
- "grad_norm": 18.672170639038086,
219
  "learning_rate": 4.347369038899744e-05,
220
- "loss": 23.7646,
221
  "step": 28
222
  },
223
  {
224
  "epoch": 0.019208478224871667,
225
- "grad_norm": 25.744522094726562,
226
  "learning_rate": 4.0245483899193595e-05,
227
- "loss": 27.9772,
228
  "step": 29
229
  },
230
  {
231
  "epoch": 0.01987083954297069,
232
- "grad_norm": 21.418855667114258,
233
  "learning_rate": 3.705904774487396e-05,
234
- "loss": 25.7398,
235
  "step": 30
236
  },
237
  {
238
  "epoch": 0.020533200861069714,
239
- "grad_norm": 19.990333557128906,
240
  "learning_rate": 3.392802673484193e-05,
241
- "loss": 24.2821,
242
  "step": 31
243
  },
244
  {
245
  "epoch": 0.021195562179168737,
246
- "grad_norm": 22.049428939819336,
247
  "learning_rate": 3.086582838174551e-05,
248
- "loss": 25.765,
249
  "step": 32
250
  },
251
  {
252
  "epoch": 0.02185792349726776,
253
- "grad_norm": 22.674144744873047,
254
  "learning_rate": 2.7885565489049946e-05,
255
- "loss": 26.8537,
256
  "step": 33
257
  },
258
  {
259
  "epoch": 0.022520284815366783,
260
- "grad_norm": 22.346158981323242,
261
  "learning_rate": 2.500000000000001e-05,
262
- "loss": 26.8923,
263
  "step": 34
264
  },
265
  {
266
  "epoch": 0.023182646133465806,
267
- "grad_norm": 22.35358428955078,
268
  "learning_rate": 2.2221488349019903e-05,
269
- "loss": 25.6358,
270
  "step": 35
271
  },
272
  {
273
  "epoch": 0.02384500745156483,
274
- "grad_norm": 26.604528427124023,
275
  "learning_rate": 1.9561928549563968e-05,
276
- "loss": 28.5165,
277
  "step": 36
278
  },
279
  {
280
  "epoch": 0.02450736876966385,
281
- "grad_norm": 25.8643741607666,
282
  "learning_rate": 1.703270924499656e-05,
283
- "loss": 31.2324,
284
  "step": 37
285
  },
286
  {
287
  "epoch": 0.025169730087762875,
288
- "grad_norm": 30.428800582885742,
289
  "learning_rate": 1.4644660940672627e-05,
290
- "loss": 27.0481,
291
  "step": 38
292
  },
293
  {
294
  "epoch": 0.025832091405861898,
295
- "grad_norm": 18.89072608947754,
296
  "learning_rate": 1.2408009626051137e-05,
297
- "loss": 23.8018,
298
  "step": 39
299
  },
300
  {
301
  "epoch": 0.02649445272396092,
302
- "grad_norm": 24.802570343017578,
303
  "learning_rate": 1.0332332985438248e-05,
304
- "loss": 26.2798,
305
  "step": 40
306
  },
307
  {
308
  "epoch": 0.027156814042059944,
309
- "grad_norm": 17.563922882080078,
310
  "learning_rate": 8.426519384872733e-06,
311
- "loss": 22.905,
312
  "step": 41
313
  },
314
  {
315
  "epoch": 0.027819175360158967,
316
- "grad_norm": 17.73468780517578,
317
  "learning_rate": 6.698729810778065e-06,
318
- "loss": 25.4188,
319
  "step": 42
320
  },
321
  {
322
  "epoch": 0.02848153667825799,
323
- "grad_norm": 18.167245864868164,
324
  "learning_rate": 5.156362923365588e-06,
325
- "loss": 25.1034,
326
  "step": 43
327
  },
328
  {
329
  "epoch": 0.029143897996357013,
330
- "grad_norm": 22.048166275024414,
331
  "learning_rate": 3.8060233744356633e-06,
332
- "loss": 28.0479,
333
  "step": 44
334
  },
335
  {
336
  "epoch": 0.029806259314456036,
337
- "grad_norm": 17.95345115661621,
338
  "learning_rate": 2.653493525244721e-06,
339
- "loss": 26.0678,
340
  "step": 45
341
  },
342
  {
343
  "epoch": 0.03046862063255506,
344
- "grad_norm": 21.010440826416016,
345
  "learning_rate": 1.70370868554659e-06,
346
- "loss": 25.4916,
347
  "step": 46
348
  },
349
  {
350
  "epoch": 0.031130981950654082,
351
- "grad_norm": 21.977310180664062,
352
  "learning_rate": 9.607359798384785e-07,
353
- "loss": 25.0578,
354
  "step": 47
355
  },
356
  {
357
  "epoch": 0.031793343268753105,
358
- "grad_norm": 23.666597366333008,
359
  "learning_rate": 4.277569313094809e-07,
360
- "loss": 26.1003,
361
  "step": 48
362
  },
363
  {
364
  "epoch": 0.03245570458685213,
365
- "grad_norm": 23.877300262451172,
366
  "learning_rate": 1.0705383806982606e-07,
367
- "loss": 27.8739,
368
  "step": 49
369
  },
370
  {
371
  "epoch": 0.03311806590495115,
372
- "grad_norm": 28.445064544677734,
373
  "learning_rate": 0.0,
374
- "loss": 28.7613,
375
  "step": 50
376
  },
377
  {
378
  "epoch": 0.03311806590495115,
379
- "eval_loss": 1.6343824863433838,
380
- "eval_runtime": 431.8551,
381
- "eval_samples_per_second": 23.552,
382
- "eval_steps_per_second": 2.945,
383
  "step": 50
384
  }
385
  ],
 
1
  {
2
+ "best_metric": 1.636163353919983,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
  "epoch": 0.03311806590495115,
5
  "eval_steps": 25,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.000662361318099023,
13
+ "grad_norm": 39.855995178222656,
14
  "learning_rate": 5e-05,
15
  "loss": 27.5824,
16
  "step": 1
 
18
  {
19
  "epoch": 0.000662361318099023,
20
  "eval_loss": 2.2696444988250732,
21
+ "eval_runtime": 434.2712,
22
+ "eval_samples_per_second": 23.421,
23
+ "eval_steps_per_second": 2.929,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.001324722636198046,
28
+ "grad_norm": 35.34762191772461,
29
  "learning_rate": 0.0001,
30
  "loss": 27.0301,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.001987083954297069,
35
+ "grad_norm": 40.128257751464844,
36
  "learning_rate": 9.989294616193017e-05,
37
+ "loss": 30.0122,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.002649445272396092,
42
+ "grad_norm": 35.508766174316406,
43
  "learning_rate": 9.957224306869053e-05,
44
+ "loss": 27.1283,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.003311806590495115,
49
+ "grad_norm": 35.003509521484375,
50
  "learning_rate": 9.903926402016153e-05,
51
+ "loss": 30.1937,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.003974167908594138,
56
+ "grad_norm": 33.496620178222656,
57
  "learning_rate": 9.829629131445342e-05,
58
+ "loss": 32.1736,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.004636529226693161,
63
+ "grad_norm": 28.025121688842773,
64
  "learning_rate": 9.73465064747553e-05,
65
+ "loss": 28.1705,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.005298890544792184,
70
+ "grad_norm": 38.546974182128906,
71
  "learning_rate": 9.619397662556435e-05,
72
+ "loss": 31.4793,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.005961251862891207,
77
+ "grad_norm": 31.470550537109375,
78
  "learning_rate": 9.484363707663442e-05,
79
+ "loss": 29.978,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.00662361318099023,
84
+ "grad_norm": 33.33460235595703,
85
  "learning_rate": 9.330127018922194e-05,
86
+ "loss": 29.724,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.007285974499089253,
91
+ "grad_norm": 38.43280792236328,
92
  "learning_rate": 9.157348061512727e-05,
93
+ "loss": 33.3028,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.007948335817188276,
98
+ "grad_norm": 35.78132247924805,
99
  "learning_rate": 8.966766701456177e-05,
100
+ "loss": 31.1296,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.0086106971352873,
105
+ "grad_norm": 36.88593673706055,
106
  "learning_rate": 8.759199037394887e-05,
107
+ "loss": 30.9662,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.009273058453386322,
112
+ "grad_norm": 35.441650390625,
113
  "learning_rate": 8.535533905932738e-05,
114
+ "loss": 26.3844,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.009935419771485345,
119
+ "grad_norm": 25.30802345275879,
120
  "learning_rate": 8.296729075500344e-05,
121
+ "loss": 26.8914,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.010597781089584368,
126
+ "grad_norm": 20.81322479248047,
127
  "learning_rate": 8.043807145043604e-05,
128
+ "loss": 24.8694,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.011260142407683391,
133
+ "grad_norm": 20.53407859802246,
134
  "learning_rate": 7.777851165098012e-05,
135
+ "loss": 24.7313,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.011922503725782414,
140
+ "grad_norm": 21.87583351135254,
141
  "learning_rate": 7.500000000000001e-05,
142
+ "loss": 26.0212,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.012584865043881437,
147
+ "grad_norm": 22.73112678527832,
148
  "learning_rate": 7.211443451095007e-05,
149
+ "loss": 25.5372,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.01324722636198046,
154
+ "grad_norm": 22.921876907348633,
155
  "learning_rate": 6.91341716182545e-05,
156
+ "loss": 26.2909,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.013909587680079483,
161
+ "grad_norm": 24.30012321472168,
162
  "learning_rate": 6.607197326515808e-05,
163
+ "loss": 27.5489,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.014571948998178506,
168
+ "grad_norm": 24.02613639831543,
169
  "learning_rate": 6.294095225512603e-05,
170
+ "loss": 27.3856,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.01523431031627753,
175
+ "grad_norm": 28.35286521911621,
176
  "learning_rate": 5.9754516100806423e-05,
177
+ "loss": 27.7234,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.015896671634376552,
182
+ "grad_norm": 33.62429428100586,
183
  "learning_rate": 5.6526309611002594e-05,
184
+ "loss": 29.4387,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.016559032952475575,
189
+ "grad_norm": 34.30913162231445,
190
  "learning_rate": 5.327015646150716e-05,
191
+ "loss": 29.8534,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.016559032952475575,
196
+ "eval_loss": 1.6895121335983276,
197
+ "eval_runtime": 437.1193,
198
+ "eval_samples_per_second": 23.268,
199
+ "eval_steps_per_second": 2.91,
200
  "step": 25
201
  },
202
  {
203
  "epoch": 0.0172213942705746,
204
+ "grad_norm": 21.834144592285156,
205
  "learning_rate": 5e-05,
206
+ "loss": 22.4394,
207
  "step": 26
208
  },
209
  {
210
  "epoch": 0.01788375558867362,
211
+ "grad_norm": 19.145488739013672,
212
  "learning_rate": 4.6729843538492847e-05,
213
+ "loss": 23.0711,
214
  "step": 27
215
  },
216
  {
217
  "epoch": 0.018546116906772644,
218
+ "grad_norm": 18.362302780151367,
219
  "learning_rate": 4.347369038899744e-05,
220
+ "loss": 23.7712,
221
  "step": 28
222
  },
223
  {
224
  "epoch": 0.019208478224871667,
225
+ "grad_norm": 26.06848907470703,
226
  "learning_rate": 4.0245483899193595e-05,
227
+ "loss": 27.9575,
228
  "step": 29
229
  },
230
  {
231
  "epoch": 0.01987083954297069,
232
+ "grad_norm": 20.97596549987793,
233
  "learning_rate": 3.705904774487396e-05,
234
+ "loss": 25.7123,
235
  "step": 30
236
  },
237
  {
238
  "epoch": 0.020533200861069714,
239
+ "grad_norm": 19.997718811035156,
240
  "learning_rate": 3.392802673484193e-05,
241
+ "loss": 24.3034,
242
  "step": 31
243
  },
244
  {
245
  "epoch": 0.021195562179168737,
246
+ "grad_norm": 22.800750732421875,
247
  "learning_rate": 3.086582838174551e-05,
248
+ "loss": 25.7513,
249
  "step": 32
250
  },
251
  {
252
  "epoch": 0.02185792349726776,
253
+ "grad_norm": 22.728124618530273,
254
  "learning_rate": 2.7885565489049946e-05,
255
+ "loss": 26.8797,
256
  "step": 33
257
  },
258
  {
259
  "epoch": 0.022520284815366783,
260
+ "grad_norm": 22.23642349243164,
261
  "learning_rate": 2.500000000000001e-05,
262
+ "loss": 26.9258,
263
  "step": 34
264
  },
265
  {
266
  "epoch": 0.023182646133465806,
267
+ "grad_norm": 22.032678604125977,
268
  "learning_rate": 2.2221488349019903e-05,
269
+ "loss": 25.6688,
270
  "step": 35
271
  },
272
  {
273
  "epoch": 0.02384500745156483,
274
+ "grad_norm": 26.3785400390625,
275
  "learning_rate": 1.9561928549563968e-05,
276
+ "loss": 28.5324,
277
  "step": 36
278
  },
279
  {
280
  "epoch": 0.02450736876966385,
281
+ "grad_norm": 26.36612892150879,
282
  "learning_rate": 1.703270924499656e-05,
283
+ "loss": 31.2677,
284
  "step": 37
285
  },
286
  {
287
  "epoch": 0.025169730087762875,
288
+ "grad_norm": 30.574783325195312,
289
  "learning_rate": 1.4644660940672627e-05,
290
+ "loss": 27.0322,
291
  "step": 38
292
  },
293
  {
294
  "epoch": 0.025832091405861898,
295
+ "grad_norm": 18.087467193603516,
296
  "learning_rate": 1.2408009626051137e-05,
297
+ "loss": 23.8036,
298
  "step": 39
299
  },
300
  {
301
  "epoch": 0.02649445272396092,
302
+ "grad_norm": 23.013254165649414,
303
  "learning_rate": 1.0332332985438248e-05,
304
+ "loss": 26.3198,
305
  "step": 40
306
  },
307
  {
308
  "epoch": 0.027156814042059944,
309
+ "grad_norm": 16.78318977355957,
310
  "learning_rate": 8.426519384872733e-06,
311
+ "loss": 22.9086,
312
  "step": 41
313
  },
314
  {
315
  "epoch": 0.027819175360158967,
316
+ "grad_norm": 17.115629196166992,
317
  "learning_rate": 6.698729810778065e-06,
318
+ "loss": 25.436,
319
  "step": 42
320
  },
321
  {
322
  "epoch": 0.02848153667825799,
323
+ "grad_norm": 17.63496208190918,
324
  "learning_rate": 5.156362923365588e-06,
325
+ "loss": 25.0821,
326
  "step": 43
327
  },
328
  {
329
  "epoch": 0.029143897996357013,
330
+ "grad_norm": 21.715238571166992,
331
  "learning_rate": 3.8060233744356633e-06,
332
+ "loss": 28.0717,
333
  "step": 44
334
  },
335
  {
336
  "epoch": 0.029806259314456036,
337
+ "grad_norm": 17.863527297973633,
338
  "learning_rate": 2.653493525244721e-06,
339
+ "loss": 26.0547,
340
  "step": 45
341
  },
342
  {
343
  "epoch": 0.03046862063255506,
344
+ "grad_norm": 20.718631744384766,
345
  "learning_rate": 1.70370868554659e-06,
346
+ "loss": 25.5254,
347
  "step": 46
348
  },
349
  {
350
  "epoch": 0.031130981950654082,
351
+ "grad_norm": 21.940893173217773,
352
  "learning_rate": 9.607359798384785e-07,
353
+ "loss": 25.1054,
354
  "step": 47
355
  },
356
  {
357
  "epoch": 0.031793343268753105,
358
+ "grad_norm": 23.446805953979492,
359
  "learning_rate": 4.277569313094809e-07,
360
+ "loss": 26.1938,
361
  "step": 48
362
  },
363
  {
364
  "epoch": 0.03245570458685213,
365
+ "grad_norm": 23.616628646850586,
366
  "learning_rate": 1.0705383806982606e-07,
367
+ "loss": 27.9034,
368
  "step": 49
369
  },
370
  {
371
  "epoch": 0.03311806590495115,
372
+ "grad_norm": 28.035184860229492,
373
  "learning_rate": 0.0,
374
+ "loss": 28.7963,
375
  "step": 50
376
  },
377
  {
378
  "epoch": 0.03311806590495115,
379
+ "eval_loss": 1.636163353919983,
380
+ "eval_runtime": 435.9234,
381
+ "eval_samples_per_second": 23.332,
382
+ "eval_steps_per_second": 2.918,
383
  "step": 50
384
  }
385
  ],
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab0c519457372ad6a80ee0dfc254474dd57398cb585e6950ec0029bb6e3d976d
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4ec30c2f040ab99c8c9780866cf93b033c1f7cafeb2492f21c48e1b70ba5d96
3
  size 6776