csikasote commited on
Commit
036f4b1
1 Parent(s): e64a637

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,9 @@ library_name: transformers
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
 
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +19,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # mms-1b-nyagen-combined-model
18
 
19
- This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.1767
22
- - Wer: 0.2447
23
 
24
  ## Model description
25
 
 
3
  license: cc-by-nc-4.0
4
  base_model: facebook/mms-1b-all
5
  tags:
6
+ - automatic-speech-recognition
7
+ - genbed
8
+ - mms
9
  - generated_from_trainer
10
  metrics:
11
  - wer
 
19
 
20
  # mms-1b-nyagen-combined-model
21
 
22
+ This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on the GENBED - BEM dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.1727
25
+ - Wer: 0.2465
26
 
27
  ## Model description
28
 
adapter.bem.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39197c05be45f59c244414284b1dcf19a5df6808fea06bf79086d95f45ee8b2a
3
+ size 8798532
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.1377899045020463,
3
+ "eval_loss": 0.17273877561092377,
4
+ "eval_runtime": 31.1778,
5
+ "eval_samples": 344,
6
+ "eval_samples_per_second": 11.033,
7
+ "eval_steps_per_second": 2.758,
8
+ "eval_wer": 0.2464726631393298,
9
+ "total_flos": 8.649929823066914e+18,
10
+ "train_loss": 0.5916279270337975,
11
+ "train_runtime": 2613.7833,
12
+ "train_samples": 2929,
13
+ "train_samples_per_second": 33.618,
14
+ "train_steps_per_second": 8.413
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.1377899045020463,
3
+ "eval_loss": 0.17273877561092377,
4
+ "eval_runtime": 31.1778,
5
+ "eval_samples": 344,
6
+ "eval_samples_per_second": 11.033,
7
+ "eval_steps_per_second": 2.758,
8
+ "eval_wer": 0.2464726631393298
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.1377899045020463,
3
+ "total_flos": 8.649929823066914e+18,
4
+ "train_loss": 0.5916279270337975,
5
+ "train_runtime": 2613.7833,
6
+ "train_samples": 2929,
7
+ "train_samples_per_second": 33.618,
8
+ "train_steps_per_second": 8.413
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.1726989597082138,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/mms-1b-nyagen-combined-model/checkpoint-2000",
4
+ "epoch": 3.1377899045020463,
5
+ "eval_steps": 100,
6
+ "global_step": 2300,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1364256480218281,
13
+ "grad_norm": 2.10461163520813,
14
+ "learning_rate": 0.00029099999999999997,
15
+ "loss": 6.9978,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.1364256480218281,
20
+ "eval_loss": 0.6384420394897461,
21
+ "eval_runtime": 30.8578,
22
+ "eval_samples_per_second": 11.148,
23
+ "eval_steps_per_second": 2.787,
24
+ "eval_wer": 0.5015432098765432,
25
+ "step": 100
26
+ },
27
+ {
28
+ "epoch": 0.2728512960436562,
29
+ "grad_norm": 0.9924084544181824,
30
+ "learning_rate": 0.0002986843307446322,
31
+ "loss": 0.482,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.2728512960436562,
36
+ "eval_loss": 0.27768996357917786,
37
+ "eval_runtime": 30.8437,
38
+ "eval_samples_per_second": 11.153,
39
+ "eval_steps_per_second": 2.788,
40
+ "eval_wer": 0.3712522045855379,
41
+ "step": 200
42
+ },
43
+ {
44
+ "epoch": 0.4092769440654843,
45
+ "grad_norm": 1.641776442527771,
46
+ "learning_rate": 0.0002973275468250342,
47
+ "loss": 0.3907,
48
+ "step": 300
49
+ },
50
+ {
51
+ "epoch": 0.4092769440654843,
52
+ "eval_loss": 0.24835941195487976,
53
+ "eval_runtime": 30.9832,
54
+ "eval_samples_per_second": 11.103,
55
+ "eval_steps_per_second": 2.776,
56
+ "eval_wer": 0.3481040564373898,
57
+ "step": 300
58
+ },
59
+ {
60
+ "epoch": 0.5457025920873124,
61
+ "grad_norm": 1.1754199266433716,
62
+ "learning_rate": 0.00029597076290543625,
63
+ "loss": 0.3782,
64
+ "step": 400
65
+ },
66
+ {
67
+ "epoch": 0.5457025920873124,
68
+ "eval_loss": 0.22902728617191315,
69
+ "eval_runtime": 30.7597,
70
+ "eval_samples_per_second": 11.183,
71
+ "eval_steps_per_second": 2.796,
72
+ "eval_wer": 0.32319223985890655,
73
+ "step": 400
74
+ },
75
+ {
76
+ "epoch": 0.6821282401091405,
77
+ "grad_norm": 1.491380214691162,
78
+ "learning_rate": 0.00029460027409776153,
79
+ "loss": 0.3316,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 0.6821282401091405,
84
+ "eval_loss": 0.222237691283226,
85
+ "eval_runtime": 31.0639,
86
+ "eval_samples_per_second": 11.074,
87
+ "eval_steps_per_second": 2.768,
88
+ "eval_wer": 0.3148148148148148,
89
+ "step": 500
90
+ },
91
+ {
92
+ "epoch": 0.8185538881309686,
93
+ "grad_norm": 0.943580150604248,
94
+ "learning_rate": 0.00029322978529008675,
95
+ "loss": 0.3158,
96
+ "step": 600
97
+ },
98
+ {
99
+ "epoch": 0.8185538881309686,
100
+ "eval_loss": 0.2126626819372177,
101
+ "eval_runtime": 31.0228,
102
+ "eval_samples_per_second": 11.089,
103
+ "eval_steps_per_second": 2.772,
104
+ "eval_wer": 0.30423280423280424,
105
+ "step": 600
106
+ },
107
+ {
108
+ "epoch": 0.9549795361527967,
109
+ "grad_norm": 1.18917715549469,
110
+ "learning_rate": 0.000291859296482412,
111
+ "loss": 0.3199,
112
+ "step": 700
113
+ },
114
+ {
115
+ "epoch": 0.9549795361527967,
116
+ "eval_loss": 0.2105857878923416,
117
+ "eval_runtime": 30.6684,
118
+ "eval_samples_per_second": 11.217,
119
+ "eval_steps_per_second": 2.804,
120
+ "eval_wer": 0.2932098765432099,
121
+ "step": 700
122
+ },
123
+ {
124
+ "epoch": 1.0914051841746248,
125
+ "grad_norm": 1.4716788530349731,
126
+ "learning_rate": 0.0002904888076747373,
127
+ "loss": 0.3223,
128
+ "step": 800
129
+ },
130
+ {
131
+ "epoch": 1.0914051841746248,
132
+ "eval_loss": 0.20132741332054138,
133
+ "eval_runtime": 30.7244,
134
+ "eval_samples_per_second": 11.196,
135
+ "eval_steps_per_second": 2.799,
136
+ "eval_wer": 0.2826278659611993,
137
+ "step": 800
138
+ },
139
+ {
140
+ "epoch": 1.2278308321964528,
141
+ "grad_norm": 0.600020706653595,
142
+ "learning_rate": 0.0002891183188670626,
143
+ "loss": 0.3075,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 1.2278308321964528,
148
+ "eval_loss": 0.19748319685459137,
149
+ "eval_runtime": 31.1335,
150
+ "eval_samples_per_second": 11.049,
151
+ "eval_steps_per_second": 2.762,
152
+ "eval_wer": 0.2709435626102293,
153
+ "step": 900
154
+ },
155
+ {
156
+ "epoch": 1.364256480218281,
157
+ "grad_norm": 0.36852729320526123,
158
+ "learning_rate": 0.00028774783005938785,
159
+ "loss": 0.3015,
160
+ "step": 1000
161
+ },
162
+ {
163
+ "epoch": 1.364256480218281,
164
+ "eval_loss": 0.1942104995250702,
165
+ "eval_runtime": 31.1423,
166
+ "eval_samples_per_second": 11.046,
167
+ "eval_steps_per_second": 2.762,
168
+ "eval_wer": 0.2762345679012346,
169
+ "step": 1000
170
+ },
171
+ {
172
+ "epoch": 1.500682128240109,
173
+ "grad_norm": 0.969207227230072,
174
+ "learning_rate": 0.00028639104613978984,
175
+ "loss": 0.3049,
176
+ "step": 1100
177
+ },
178
+ {
179
+ "epoch": 1.500682128240109,
180
+ "eval_loss": 0.18946479260921478,
181
+ "eval_runtime": 30.9295,
182
+ "eval_samples_per_second": 11.122,
183
+ "eval_steps_per_second": 2.781,
184
+ "eval_wer": 0.27292768959435626,
185
+ "step": 1100
186
+ },
187
+ {
188
+ "epoch": 1.6371077762619373,
189
+ "grad_norm": 0.7448552250862122,
190
+ "learning_rate": 0.00028502055733211506,
191
+ "loss": 0.3029,
192
+ "step": 1200
193
+ },
194
+ {
195
+ "epoch": 1.6371077762619373,
196
+ "eval_loss": 0.1888139843940735,
197
+ "eval_runtime": 31.0815,
198
+ "eval_samples_per_second": 11.068,
199
+ "eval_steps_per_second": 2.767,
200
+ "eval_wer": 0.2718253968253968,
201
+ "step": 1200
202
+ },
203
+ {
204
+ "epoch": 1.7735334242837655,
205
+ "grad_norm": 0.7603126168251038,
206
+ "learning_rate": 0.0002836500685244404,
207
+ "loss": 0.2626,
208
+ "step": 1300
209
+ },
210
+ {
211
+ "epoch": 1.7735334242837655,
212
+ "eval_loss": 0.1865960955619812,
213
+ "eval_runtime": 31.1543,
214
+ "eval_samples_per_second": 11.042,
215
+ "eval_steps_per_second": 2.76,
216
+ "eval_wer": 0.2682980599647266,
217
+ "step": 1300
218
+ },
219
+ {
220
+ "epoch": 1.9099590723055935,
221
+ "grad_norm": 0.49969810247421265,
222
+ "learning_rate": 0.0002822795797167656,
223
+ "loss": 0.2803,
224
+ "step": 1400
225
+ },
226
+ {
227
+ "epoch": 1.9099590723055935,
228
+ "eval_loss": 0.18303100764751434,
229
+ "eval_runtime": 31.2418,
230
+ "eval_samples_per_second": 11.011,
231
+ "eval_steps_per_second": 2.753,
232
+ "eval_wer": 0.26146384479717816,
233
+ "step": 1400
234
+ },
235
+ {
236
+ "epoch": 2.0463847203274215,
237
+ "grad_norm": 1.5848807096481323,
238
+ "learning_rate": 0.0002809090909090909,
239
+ "loss": 0.2725,
240
+ "step": 1500
241
+ },
242
+ {
243
+ "epoch": 2.0463847203274215,
244
+ "eval_loss": 0.1813870072364807,
245
+ "eval_runtime": 31.0194,
246
+ "eval_samples_per_second": 11.09,
247
+ "eval_steps_per_second": 2.772,
248
+ "eval_wer": 0.26256613756613756,
249
+ "step": 1500
250
+ },
251
+ {
252
+ "epoch": 2.1828103683492497,
253
+ "grad_norm": 0.9148170948028564,
254
+ "learning_rate": 0.00027953860210141616,
255
+ "loss": 0.2732,
256
+ "step": 1600
257
+ },
258
+ {
259
+ "epoch": 2.1828103683492497,
260
+ "eval_loss": 0.1783067137002945,
261
+ "eval_runtime": 31.1451,
262
+ "eval_samples_per_second": 11.045,
263
+ "eval_steps_per_second": 2.761,
264
+ "eval_wer": 0.2641093474426808,
265
+ "step": 1600
266
+ },
267
+ {
268
+ "epoch": 2.319236016371078,
269
+ "grad_norm": 0.7404142618179321,
270
+ "learning_rate": 0.00027816811329374144,
271
+ "loss": 0.249,
272
+ "step": 1700
273
+ },
274
+ {
275
+ "epoch": 2.319236016371078,
276
+ "eval_loss": 0.1828220933675766,
277
+ "eval_runtime": 31.1781,
278
+ "eval_samples_per_second": 11.033,
279
+ "eval_steps_per_second": 2.758,
280
+ "eval_wer": 0.25595238095238093,
281
+ "step": 1700
282
+ },
283
+ {
284
+ "epoch": 2.4556616643929057,
285
+ "grad_norm": 0.795720100402832,
286
+ "learning_rate": 0.00027679762448606666,
287
+ "loss": 0.2423,
288
+ "step": 1800
289
+ },
290
+ {
291
+ "epoch": 2.4556616643929057,
292
+ "eval_loss": 0.17622150480747223,
293
+ "eval_runtime": 31.3032,
294
+ "eval_samples_per_second": 10.989,
295
+ "eval_steps_per_second": 2.747,
296
+ "eval_wer": 0.24801587301587302,
297
+ "step": 1800
298
+ },
299
+ {
300
+ "epoch": 2.592087312414734,
301
+ "grad_norm": 2.1790249347686768,
302
+ "learning_rate": 0.00027542713567839193,
303
+ "loss": 0.2668,
304
+ "step": 1900
305
+ },
306
+ {
307
+ "epoch": 2.592087312414734,
308
+ "eval_loss": 0.1731557846069336,
309
+ "eval_runtime": 31.2391,
310
+ "eval_samples_per_second": 11.012,
311
+ "eval_steps_per_second": 2.753,
312
+ "eval_wer": 0.24581128747795414,
313
+ "step": 1900
314
+ },
315
+ {
316
+ "epoch": 2.728512960436562,
317
+ "grad_norm": 1.353415608406067,
318
+ "learning_rate": 0.0002740566468707172,
319
+ "loss": 0.2653,
320
+ "step": 2000
321
+ },
322
+ {
323
+ "epoch": 2.728512960436562,
324
+ "eval_loss": 0.1726989597082138,
325
+ "eval_runtime": 30.843,
326
+ "eval_samples_per_second": 11.153,
327
+ "eval_steps_per_second": 2.788,
328
+ "eval_wer": 0.24603174603174602,
329
+ "step": 2000
330
+ },
331
+ {
332
+ "epoch": 2.8649386084583903,
333
+ "grad_norm": 0.9100846648216248,
334
+ "learning_rate": 0.00027268615806304243,
335
+ "loss": 0.2614,
336
+ "step": 2100
337
+ },
338
+ {
339
+ "epoch": 2.8649386084583903,
340
+ "eval_loss": 0.1748967319726944,
341
+ "eval_runtime": 31.2508,
342
+ "eval_samples_per_second": 11.008,
343
+ "eval_steps_per_second": 2.752,
344
+ "eval_wer": 0.2533068783068783,
345
+ "step": 2100
346
+ },
347
+ {
348
+ "epoch": 3.001364256480218,
349
+ "grad_norm": 0.705278217792511,
350
+ "learning_rate": 0.0002713156692553677,
351
+ "loss": 0.2474,
352
+ "step": 2200
353
+ },
354
+ {
355
+ "epoch": 3.001364256480218,
356
+ "eval_loss": 0.1732555478811264,
357
+ "eval_runtime": 31.2961,
358
+ "eval_samples_per_second": 10.992,
359
+ "eval_steps_per_second": 2.748,
360
+ "eval_wer": 0.24382716049382716,
361
+ "step": 2200
362
+ },
363
+ {
364
+ "epoch": 3.1377899045020463,
365
+ "grad_norm": 1.5550167560577393,
366
+ "learning_rate": 0.000269945180447693,
367
+ "loss": 0.2317,
368
+ "step": 2300
369
+ },
370
+ {
371
+ "epoch": 3.1377899045020463,
372
+ "eval_loss": 0.17674298584461212,
373
+ "eval_runtime": 31.0854,
374
+ "eval_samples_per_second": 11.066,
375
+ "eval_steps_per_second": 2.767,
376
+ "eval_wer": 0.2447089947089947,
377
+ "step": 2300
378
+ },
379
+ {
380
+ "epoch": 3.1377899045020463,
381
+ "step": 2300,
382
+ "total_flos": 8.649929823066914e+18,
383
+ "train_loss": 0.5916279270337975,
384
+ "train_runtime": 2613.7833,
385
+ "train_samples_per_second": 33.618,
386
+ "train_steps_per_second": 8.413
387
+ }
388
+ ],
389
+ "logging_steps": 100,
390
+ "max_steps": 21990,
391
+ "num_input_tokens_seen": 0,
392
+ "num_train_epochs": 30,
393
+ "save_steps": 400,
394
+ "stateful_callbacks": {
395
+ "EarlyStoppingCallback": {
396
+ "args": {
397
+ "early_stopping_patience": 3,
398
+ "early_stopping_threshold": 0.0
399
+ },
400
+ "attributes": {
401
+ "early_stopping_patience_counter": 0
402
+ }
403
+ },
404
+ "TrainerControl": {
405
+ "args": {
406
+ "should_epoch_stop": false,
407
+ "should_evaluate": false,
408
+ "should_log": false,
409
+ "should_save": true,
410
+ "should_training_stop": false
411
+ },
412
+ "attributes": {}
413
+ }
414
+ },
415
+ "total_flos": 8.649929823066914e+18,
416
+ "train_batch_size": 4,
417
+ "trial_name": null,
418
+ "trial_params": null
419
+ }