Cafet commited on
Commit
3d66cce
1 Parent(s): 9a326e6

Upload 8 files

Browse files
Files changed (5) hide show
  1. model.safetensors +1 -1
  2. optimizer.pt +3 -0
  3. rng_state.pth +3 -0
  4. scheduler.pt +3 -0
  5. trainer_state.json +342 -0
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:962170d60c4eb94eec88ef9d64f38214dac535d2797e3ab0a002adda4de5e759
3
  size 2422974460
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7236b7d224d5aa3553d0c13a8e81d91b7a8d56ec4d855f3268926f531b43b991
3
  size 2422974460
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fb614f6258219a1a8afa5beb50209824c4ccfbb81d01c3e5f92fc1199c026d8
3
+ size 4846409706
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f7c0e26e544562a785563534b37a6a9d9cad5b67dcb89ac3dae1cdb8bfa9a9b
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:665c895d17e43975c6948ab4f9c42311a2215bbd4abf94b595fda0aac39265bd
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.04373383894562721,
3
+ "best_model_checkpoint": "w2v-bert-final-v2/checkpoint-10000",
4
+ "epoch": 7.716049382716049,
5
+ "eval_steps": 1000,
6
+ "global_step": 10000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.23148148148148148,
13
+ "grad_norm": 7.229776859283447,
14
+ "learning_rate": 7.425e-06,
15
+ "loss": 3.4047,
16
+ "step": 300
17
+ },
18
+ {
19
+ "epoch": 0.46296296296296297,
20
+ "grad_norm": 11.000567436218262,
21
+ "learning_rate": 1.4925e-05,
22
+ "loss": 0.8648,
23
+ "step": 600
24
+ },
25
+ {
26
+ "epoch": 0.6944444444444444,
27
+ "grad_norm": 11.643492698669434,
28
+ "learning_rate": 2.2400000000000002e-05,
29
+ "loss": 0.6263,
30
+ "step": 900
31
+ },
32
+ {
33
+ "epoch": 0.7716049382716049,
34
+ "eval_loss": 0.5938597321510315,
35
+ "eval_runtime": 49.2872,
36
+ "eval_samples_per_second": 38.408,
37
+ "eval_steps_per_second": 4.809,
38
+ "eval_wer": 0.509299543180335,
39
+ "step": 1000
40
+ },
41
+ {
42
+ "epoch": 0.9259259259259259,
43
+ "grad_norm": 16.209674835205078,
44
+ "learning_rate": 2.9900000000000002e-05,
45
+ "loss": 0.5512,
46
+ "step": 1200
47
+ },
48
+ {
49
+ "epoch": 1.1574074074074074,
50
+ "grad_norm": 2.7776882648468018,
51
+ "learning_rate": 3.74e-05,
52
+ "loss": 0.4844,
53
+ "step": 1500
54
+ },
55
+ {
56
+ "epoch": 1.3888888888888888,
57
+ "grad_norm": 5.179060459136963,
58
+ "learning_rate": 4.4875e-05,
59
+ "loss": 0.4641,
60
+ "step": 1800
61
+ },
62
+ {
63
+ "epoch": 1.5432098765432098,
64
+ "eval_loss": 0.5058629512786865,
65
+ "eval_runtime": 49.9367,
66
+ "eval_samples_per_second": 37.908,
67
+ "eval_steps_per_second": 4.746,
68
+ "eval_wer": 0.45061996954535566,
69
+ "step": 2000
70
+ },
71
+ {
72
+ "epoch": 1.6203703703703702,
73
+ "grad_norm": 2.7534990310668945,
74
+ "learning_rate": 4.943236137667304e-05,
75
+ "loss": 0.4629,
76
+ "step": 2100
77
+ },
78
+ {
79
+ "epoch": 1.8518518518518519,
80
+ "grad_norm": 3.1153225898742676,
81
+ "learning_rate": 4.763981835564054e-05,
82
+ "loss": 0.4393,
83
+ "step": 2400
84
+ },
85
+ {
86
+ "epoch": 2.0833333333333335,
87
+ "grad_norm": 2.560495376586914,
88
+ "learning_rate": 4.584727533460803e-05,
89
+ "loss": 0.3782,
90
+ "step": 2700
91
+ },
92
+ {
93
+ "epoch": 2.314814814814815,
94
+ "grad_norm": 2.7176785469055176,
95
+ "learning_rate": 4.4054732313575525e-05,
96
+ "loss": 0.3054,
97
+ "step": 3000
98
+ },
99
+ {
100
+ "epoch": 2.314814814814815,
101
+ "eval_loss": 0.34695935249328613,
102
+ "eval_runtime": 49.4937,
103
+ "eval_samples_per_second": 38.247,
104
+ "eval_steps_per_second": 4.788,
105
+ "eval_wer": 0.3308135740700457,
106
+ "step": 3000
107
+ },
108
+ {
109
+ "epoch": 2.5462962962962963,
110
+ "grad_norm": 2.319594383239746,
111
+ "learning_rate": 4.226816443594647e-05,
112
+ "loss": 0.3079,
113
+ "step": 3300
114
+ },
115
+ {
116
+ "epoch": 2.7777777777777777,
117
+ "grad_norm": 2.165224552154541,
118
+ "learning_rate": 4.047562141491396e-05,
119
+ "loss": 0.301,
120
+ "step": 3600
121
+ },
122
+ {
123
+ "epoch": 3.009259259259259,
124
+ "grad_norm": 3.609168529510498,
125
+ "learning_rate": 3.8683078393881456e-05,
126
+ "loss": 0.2837,
127
+ "step": 3900
128
+ },
129
+ {
130
+ "epoch": 3.0864197530864197,
131
+ "eval_loss": 0.2659013867378235,
132
+ "eval_runtime": 50.1986,
133
+ "eval_samples_per_second": 37.71,
134
+ "eval_steps_per_second": 4.721,
135
+ "eval_wer": 0.2583206438981945,
136
+ "step": 4000
137
+ },
138
+ {
139
+ "epoch": 3.240740740740741,
140
+ "grad_norm": 1.6436126232147217,
141
+ "learning_rate": 3.689053537284895e-05,
142
+ "loss": 0.2191,
143
+ "step": 4200
144
+ },
145
+ {
146
+ "epoch": 3.4722222222222223,
147
+ "grad_norm": 1.704137921333313,
148
+ "learning_rate": 3.5103967495219884e-05,
149
+ "loss": 0.2103,
150
+ "step": 4500
151
+ },
152
+ {
153
+ "epoch": 3.7037037037037037,
154
+ "grad_norm": 6.373330593109131,
155
+ "learning_rate": 3.331142447418738e-05,
156
+ "loss": 0.2174,
157
+ "step": 4800
158
+ },
159
+ {
160
+ "epoch": 3.8580246913580245,
161
+ "eval_loss": 0.19009661674499512,
162
+ "eval_runtime": 49.6613,
163
+ "eval_samples_per_second": 38.118,
164
+ "eval_steps_per_second": 4.772,
165
+ "eval_wer": 0.195072873613226,
166
+ "step": 5000
167
+ },
168
+ {
169
+ "epoch": 3.935185185185185,
170
+ "grad_norm": 2.4362969398498535,
171
+ "learning_rate": 3.151888145315488e-05,
172
+ "loss": 0.1939,
173
+ "step": 5100
174
+ },
175
+ {
176
+ "epoch": 4.166666666666667,
177
+ "grad_norm": 1.4299020767211914,
178
+ "learning_rate": 2.9726338432122373e-05,
179
+ "loss": 0.1666,
180
+ "step": 5400
181
+ },
182
+ {
183
+ "epoch": 4.398148148148148,
184
+ "grad_norm": 1.3548126220703125,
185
+ "learning_rate": 2.793379541108987e-05,
186
+ "loss": 0.1569,
187
+ "step": 5700
188
+ },
189
+ {
190
+ "epoch": 4.62962962962963,
191
+ "grad_norm": 1.5247106552124023,
192
+ "learning_rate": 2.6141252390057363e-05,
193
+ "loss": 0.152,
194
+ "step": 6000
195
+ },
196
+ {
197
+ "epoch": 4.62962962962963,
198
+ "eval_loss": 0.15165844559669495,
199
+ "eval_runtime": 49.8658,
200
+ "eval_samples_per_second": 37.962,
201
+ "eval_steps_per_second": 4.753,
202
+ "eval_wer": 0.1540678703502284,
203
+ "step": 6000
204
+ },
205
+ {
206
+ "epoch": 4.861111111111111,
207
+ "grad_norm": 3.228041410446167,
208
+ "learning_rate": 2.434870936902486e-05,
209
+ "loss": 0.1481,
210
+ "step": 6300
211
+ },
212
+ {
213
+ "epoch": 5.092592592592593,
214
+ "grad_norm": 1.5059906244277954,
215
+ "learning_rate": 2.2562141491395794e-05,
216
+ "loss": 0.1256,
217
+ "step": 6600
218
+ },
219
+ {
220
+ "epoch": 5.324074074074074,
221
+ "grad_norm": 3.538102626800537,
222
+ "learning_rate": 2.0769598470363287e-05,
223
+ "loss": 0.1089,
224
+ "step": 6900
225
+ },
226
+ {
227
+ "epoch": 5.401234567901234,
228
+ "eval_loss": 0.11081259697675705,
229
+ "eval_runtime": 49.8031,
230
+ "eval_samples_per_second": 38.01,
231
+ "eval_steps_per_second": 4.759,
232
+ "eval_wer": 0.11404176636937133,
233
+ "step": 7000
234
+ },
235
+ {
236
+ "epoch": 5.555555555555555,
237
+ "grad_norm": 0.6531468033790588,
238
+ "learning_rate": 1.8977055449330787e-05,
239
+ "loss": 0.1022,
240
+ "step": 7200
241
+ },
242
+ {
243
+ "epoch": 5.787037037037037,
244
+ "grad_norm": 1.7070139646530151,
245
+ "learning_rate": 1.718451242829828e-05,
246
+ "loss": 0.1022,
247
+ "step": 7500
248
+ },
249
+ {
250
+ "epoch": 6.018518518518518,
251
+ "grad_norm": 1.612425446510315,
252
+ "learning_rate": 1.5397944550669215e-05,
253
+ "loss": 0.0986,
254
+ "step": 7800
255
+ },
256
+ {
257
+ "epoch": 6.172839506172839,
258
+ "eval_loss": 0.08366883546113968,
259
+ "eval_runtime": 51.7773,
260
+ "eval_samples_per_second": 36.56,
261
+ "eval_steps_per_second": 4.577,
262
+ "eval_wer": 0.08902545138133565,
263
+ "step": 8000
264
+ },
265
+ {
266
+ "epoch": 6.25,
267
+ "grad_norm": 0.7614215612411499,
268
+ "learning_rate": 1.3605401529636713e-05,
269
+ "loss": 0.0766,
270
+ "step": 8100
271
+ },
272
+ {
273
+ "epoch": 6.481481481481482,
274
+ "grad_norm": 2.542365550994873,
275
+ "learning_rate": 1.181883365200765e-05,
276
+ "loss": 0.0758,
277
+ "step": 8400
278
+ },
279
+ {
280
+ "epoch": 6.712962962962963,
281
+ "grad_norm": 1.319675326347351,
282
+ "learning_rate": 1.0026290630975144e-05,
283
+ "loss": 0.0726,
284
+ "step": 8700
285
+ },
286
+ {
287
+ "epoch": 6.944444444444445,
288
+ "grad_norm": 0.8109455704689026,
289
+ "learning_rate": 8.233747609942639e-06,
290
+ "loss": 0.0648,
291
+ "step": 9000
292
+ },
293
+ {
294
+ "epoch": 6.944444444444445,
295
+ "eval_loss": 0.05808680132031441,
296
+ "eval_runtime": 51.2984,
297
+ "eval_samples_per_second": 36.902,
298
+ "eval_steps_per_second": 4.62,
299
+ "eval_wer": 0.06031107243854688,
300
+ "step": 9000
301
+ },
302
+ {
303
+ "epoch": 7.175925925925926,
304
+ "grad_norm": 1.2560392618179321,
305
+ "learning_rate": 6.441204588910134e-06,
306
+ "loss": 0.051,
307
+ "step": 9300
308
+ },
309
+ {
310
+ "epoch": 7.407407407407407,
311
+ "grad_norm": 3.7403156757354736,
312
+ "learning_rate": 4.654636711281071e-06,
313
+ "loss": 0.0473,
314
+ "step": 9600
315
+ },
316
+ {
317
+ "epoch": 7.638888888888889,
318
+ "grad_norm": 0.7289965748786926,
319
+ "learning_rate": 2.862093690248566e-06,
320
+ "loss": 0.0499,
321
+ "step": 9900
322
+ },
323
+ {
324
+ "epoch": 7.716049382716049,
325
+ "eval_loss": 0.04373383894562721,
326
+ "eval_runtime": 50.2864,
327
+ "eval_samples_per_second": 37.644,
328
+ "eval_steps_per_second": 4.713,
329
+ "eval_wer": 0.04606264955405699,
330
+ "step": 10000
331
+ }
332
+ ],
333
+ "logging_steps": 300,
334
+ "max_steps": 10368,
335
+ "num_input_tokens_seen": 0,
336
+ "num_train_epochs": 8,
337
+ "save_steps": 1000,
338
+ "total_flos": 2.075992650724199e+19,
339
+ "train_batch_size": 8,
340
+ "trial_name": null,
341
+ "trial_params": null
342
+ }