khalidey commited on
Commit
2fea496
1 Parent(s): 27e72c1

Delete checkpoint-2000

Browse files
checkpoint-2000/config.json DELETED
@@ -1,42 +0,0 @@
1
- {
2
- "_name_or_path": "openai/whisper-small",
3
- "activation_dropout": 0.0,
4
- "activation_function": "gelu",
5
- "architectures": [
6
- "WhisperForConditionalGeneration"
7
- ],
8
- "attention_dropout": 0.0,
9
- "begin_suppress_tokens": [
10
- 220,
11
- 50257
12
- ],
13
- "bos_token_id": 50257,
14
- "d_model": 768,
15
- "decoder_attention_heads": 12,
16
- "decoder_ffn_dim": 3072,
17
- "decoder_layerdrop": 0.0,
18
- "decoder_layers": 12,
19
- "decoder_start_token_id": 50258,
20
- "dropout": 0.0,
21
- "encoder_attention_heads": 12,
22
- "encoder_ffn_dim": 3072,
23
- "encoder_layerdrop": 0.0,
24
- "encoder_layers": 12,
25
- "eos_token_id": 50257,
26
- "forced_decoder_ids": null,
27
- "init_std": 0.02,
28
- "is_encoder_decoder": true,
29
- "max_length": 448,
30
- "max_source_positions": 1500,
31
- "max_target_positions": 448,
32
- "model_type": "whisper",
33
- "num_hidden_layers": 12,
34
- "num_mel_bins": 80,
35
- "pad_token_id": 50257,
36
- "scale_embedding": false,
37
- "suppress_tokens": [],
38
- "torch_dtype": "float32",
39
- "transformers_version": "4.26.0.dev0",
40
- "use_cache": true,
41
- "vocab_size": 51865
42
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:62df7a0e1a4a3e28ddd03c1fb05e5f874e1f99fe7ce6590e2b995534939fcc4a
3
- size 1934158597
 
 
 
 
checkpoint-2000/preprocessor_config.json DELETED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:42f6c9c8ce011a3cd64f73e554ffad7a7eeccb1ab10f81ab3ab01e2fc88da522
3
- size 967099139
 
 
 
 
checkpoint-2000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7bce841fb4dbbf6bd41053f16c84251d18e5bc5818d9dff667509985dcc60684
3
- size 14439
 
 
 
 
checkpoint-2000/scaler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1107989d62f000d9dde8f5b09650bc15f7856c9608cf33df0efca161d5c02bd
3
- size 559
 
 
 
 
checkpoint-2000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c99784789da5872bcf0411dd2748247fdf0e8c4cf1ae88c6e57854569056aff
3
- size 623
 
 
 
 
checkpoint-2000/trainer_state.json DELETED
@@ -1,514 +0,0 @@
1
- {
2
- "best_metric": 20.332876233497352,
3
- "best_model_checkpoint": "/content/gdrive/MyDrive/Whisper_Checkpoints/checkpoint-2000",
4
- "epoch": 2.5873221216041395,
5
- "global_step": 2000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.03,
12
- "learning_rate": 4.2000000000000006e-07,
13
- "loss": 4.2747,
14
- "step": 25
15
- },
16
- {
17
- "epoch": 0.06,
18
- "learning_rate": 9.200000000000001e-07,
19
- "loss": 3.4782,
20
- "step": 50
21
- },
22
- {
23
- "epoch": 0.1,
24
- "learning_rate": 1.42e-06,
25
- "loss": 2.4424,
26
- "step": 75
27
- },
28
- {
29
- "epoch": 0.13,
30
- "learning_rate": 1.9200000000000003e-06,
31
- "loss": 1.4339,
32
- "step": 100
33
- },
34
- {
35
- "epoch": 0.16,
36
- "learning_rate": 2.42e-06,
37
- "loss": 1.1695,
38
- "step": 125
39
- },
40
- {
41
- "epoch": 0.19,
42
- "learning_rate": 2.92e-06,
43
- "loss": 1.0811,
44
- "step": 150
45
- },
46
- {
47
- "epoch": 0.23,
48
- "learning_rate": 3.4200000000000007e-06,
49
- "loss": 0.9162,
50
- "step": 175
51
- },
52
- {
53
- "epoch": 0.26,
54
- "learning_rate": 3.920000000000001e-06,
55
- "loss": 0.8637,
56
- "step": 200
57
- },
58
- {
59
- "epoch": 0.29,
60
- "learning_rate": 4.42e-06,
61
- "loss": 0.7423,
62
- "step": 225
63
- },
64
- {
65
- "epoch": 0.32,
66
- "learning_rate": 4.92e-06,
67
- "loss": 0.6393,
68
- "step": 250
69
- },
70
- {
71
- "epoch": 0.36,
72
- "learning_rate": 5.420000000000001e-06,
73
- "loss": 0.4363,
74
- "step": 275
75
- },
76
- {
77
- "epoch": 0.39,
78
- "learning_rate": 5.92e-06,
79
- "loss": 0.3485,
80
- "step": 300
81
- },
82
- {
83
- "epoch": 0.42,
84
- "learning_rate": 6.42e-06,
85
- "loss": 0.2951,
86
- "step": 325
87
- },
88
- {
89
- "epoch": 0.45,
90
- "learning_rate": 6.92e-06,
91
- "loss": 0.3216,
92
- "step": 350
93
- },
94
- {
95
- "epoch": 0.49,
96
- "learning_rate": 7.420000000000001e-06,
97
- "loss": 0.2954,
98
- "step": 375
99
- },
100
- {
101
- "epoch": 0.52,
102
- "learning_rate": 7.92e-06,
103
- "loss": 0.317,
104
- "step": 400
105
- },
106
- {
107
- "epoch": 0.55,
108
- "learning_rate": 8.42e-06,
109
- "loss": 0.3122,
110
- "step": 425
111
- },
112
- {
113
- "epoch": 0.58,
114
- "learning_rate": 8.920000000000001e-06,
115
- "loss": 0.3339,
116
- "step": 450
117
- },
118
- {
119
- "epoch": 0.61,
120
- "learning_rate": 9.42e-06,
121
- "loss": 0.2815,
122
- "step": 475
123
- },
124
- {
125
- "epoch": 0.65,
126
- "learning_rate": 9.920000000000002e-06,
127
- "loss": 0.3144,
128
- "step": 500
129
- },
130
- {
131
- "epoch": 0.68,
132
- "learning_rate": 9.940000000000001e-06,
133
- "loss": 0.2778,
134
- "step": 525
135
- },
136
- {
137
- "epoch": 0.71,
138
- "learning_rate": 9.86857142857143e-06,
139
- "loss": 0.3041,
140
- "step": 550
141
- },
142
- {
143
- "epoch": 0.74,
144
- "learning_rate": 9.797142857142858e-06,
145
- "loss": 0.3016,
146
- "step": 575
147
- },
148
- {
149
- "epoch": 0.78,
150
- "learning_rate": 9.725714285714287e-06,
151
- "loss": 0.2984,
152
- "step": 600
153
- },
154
- {
155
- "epoch": 0.81,
156
- "learning_rate": 9.654285714285716e-06,
157
- "loss": 0.2963,
158
- "step": 625
159
- },
160
- {
161
- "epoch": 0.84,
162
- "learning_rate": 9.582857142857143e-06,
163
- "loss": 0.2958,
164
- "step": 650
165
- },
166
- {
167
- "epoch": 0.87,
168
- "learning_rate": 9.511428571428572e-06,
169
- "loss": 0.2896,
170
- "step": 675
171
- },
172
- {
173
- "epoch": 0.91,
174
- "learning_rate": 9.440000000000001e-06,
175
- "loss": 0.2663,
176
- "step": 700
177
- },
178
- {
179
- "epoch": 0.94,
180
- "learning_rate": 9.368571428571428e-06,
181
- "loss": 0.2612,
182
- "step": 725
183
- },
184
- {
185
- "epoch": 0.97,
186
- "learning_rate": 9.297142857142857e-06,
187
- "loss": 0.2912,
188
- "step": 750
189
- },
190
- {
191
- "epoch": 1.0,
192
- "learning_rate": 9.225714285714286e-06,
193
- "loss": 0.2776,
194
- "step": 775
195
- },
196
- {
197
- "epoch": 1.03,
198
- "learning_rate": 9.154285714285715e-06,
199
- "loss": 0.1359,
200
- "step": 800
201
- },
202
- {
203
- "epoch": 1.07,
204
- "learning_rate": 9.082857142857143e-06,
205
- "loss": 0.1632,
206
- "step": 825
207
- },
208
- {
209
- "epoch": 1.1,
210
- "learning_rate": 9.011428571428572e-06,
211
- "loss": 0.1571,
212
- "step": 850
213
- },
214
- {
215
- "epoch": 1.13,
216
- "learning_rate": 8.94e-06,
217
- "loss": 0.137,
218
- "step": 875
219
- },
220
- {
221
- "epoch": 1.16,
222
- "learning_rate": 8.86857142857143e-06,
223
- "loss": 0.1373,
224
- "step": 900
225
- },
226
- {
227
- "epoch": 1.2,
228
- "learning_rate": 8.797142857142857e-06,
229
- "loss": 0.145,
230
- "step": 925
231
- },
232
- {
233
- "epoch": 1.23,
234
- "learning_rate": 8.725714285714286e-06,
235
- "loss": 0.1627,
236
- "step": 950
237
- },
238
- {
239
- "epoch": 1.26,
240
- "learning_rate": 8.654285714285715e-06,
241
- "loss": 0.1561,
242
- "step": 975
243
- },
244
- {
245
- "epoch": 1.29,
246
- "learning_rate": 8.582857142857144e-06,
247
- "loss": 0.1321,
248
- "step": 1000
249
- },
250
- {
251
- "epoch": 1.29,
252
- "eval_loss": 0.2980383336544037,
253
- "eval_runtime": 1963.1004,
254
- "eval_samples_per_second": 2.582,
255
- "eval_steps_per_second": 0.323,
256
- "eval_wer": 21.55629050038988,
257
- "step": 1000
258
- },
259
- {
260
- "epoch": 1.33,
261
- "learning_rate": 8.511428571428571e-06,
262
- "loss": 0.1477,
263
- "step": 1025
264
- },
265
- {
266
- "epoch": 1.36,
267
- "learning_rate": 8.44e-06,
268
- "loss": 0.1378,
269
- "step": 1050
270
- },
271
- {
272
- "epoch": 1.39,
273
- "learning_rate": 8.36857142857143e-06,
274
- "loss": 0.1354,
275
- "step": 1075
276
- },
277
- {
278
- "epoch": 1.42,
279
- "learning_rate": 8.297142857142859e-06,
280
- "loss": 0.1352,
281
- "step": 1100
282
- },
283
- {
284
- "epoch": 1.46,
285
- "learning_rate": 8.225714285714288e-06,
286
- "loss": 0.1456,
287
- "step": 1125
288
- },
289
- {
290
- "epoch": 1.49,
291
- "learning_rate": 8.154285714285715e-06,
292
- "loss": 0.1386,
293
- "step": 1150
294
- },
295
- {
296
- "epoch": 1.52,
297
- "learning_rate": 8.082857142857144e-06,
298
- "loss": 0.1366,
299
- "step": 1175
300
- },
301
- {
302
- "epoch": 1.55,
303
- "learning_rate": 8.011428571428573e-06,
304
- "loss": 0.1509,
305
- "step": 1200
306
- },
307
- {
308
- "epoch": 1.58,
309
- "learning_rate": 7.94e-06,
310
- "loss": 0.136,
311
- "step": 1225
312
- },
313
- {
314
- "epoch": 1.62,
315
- "learning_rate": 7.86857142857143e-06,
316
- "loss": 0.1488,
317
- "step": 1250
318
- },
319
- {
320
- "epoch": 1.65,
321
- "learning_rate": 7.797142857142858e-06,
322
- "loss": 0.1662,
323
- "step": 1275
324
- },
325
- {
326
- "epoch": 1.68,
327
- "learning_rate": 7.725714285714286e-06,
328
- "loss": 0.1505,
329
- "step": 1300
330
- },
331
- {
332
- "epoch": 1.71,
333
- "learning_rate": 7.654285714285715e-06,
334
- "loss": 0.1431,
335
- "step": 1325
336
- },
337
- {
338
- "epoch": 1.75,
339
- "learning_rate": 7.5828571428571444e-06,
340
- "loss": 0.1263,
341
- "step": 1350
342
- },
343
- {
344
- "epoch": 1.78,
345
- "learning_rate": 7.511428571428572e-06,
346
- "loss": 0.132,
347
- "step": 1375
348
- },
349
- {
350
- "epoch": 1.81,
351
- "learning_rate": 7.440000000000001e-06,
352
- "loss": 0.1429,
353
- "step": 1400
354
- },
355
- {
356
- "epoch": 1.84,
357
- "learning_rate": 7.36857142857143e-06,
358
- "loss": 0.1398,
359
- "step": 1425
360
- },
361
- {
362
- "epoch": 1.88,
363
- "learning_rate": 7.297142857142858e-06,
364
- "loss": 0.1467,
365
- "step": 1450
366
- },
367
- {
368
- "epoch": 1.91,
369
- "learning_rate": 7.225714285714286e-06,
370
- "loss": 0.146,
371
- "step": 1475
372
- },
373
- {
374
- "epoch": 1.94,
375
- "learning_rate": 7.154285714285715e-06,
376
- "loss": 0.1318,
377
- "step": 1500
378
- },
379
- {
380
- "epoch": 1.97,
381
- "learning_rate": 7.082857142857143e-06,
382
- "loss": 0.1365,
383
- "step": 1525
384
- },
385
- {
386
- "epoch": 2.01,
387
- "learning_rate": 7.011428571428572e-06,
388
- "loss": 0.1236,
389
- "step": 1550
390
- },
391
- {
392
- "epoch": 2.04,
393
- "learning_rate": 6.9400000000000005e-06,
394
- "loss": 0.049,
395
- "step": 1575
396
- },
397
- {
398
- "epoch": 2.07,
399
- "learning_rate": 6.868571428571429e-06,
400
- "loss": 0.0512,
401
- "step": 1600
402
- },
403
- {
404
- "epoch": 2.1,
405
- "learning_rate": 6.797142857142858e-06,
406
- "loss": 0.0499,
407
- "step": 1625
408
- },
409
- {
410
- "epoch": 2.13,
411
- "learning_rate": 6.725714285714287e-06,
412
- "loss": 0.0477,
413
- "step": 1650
414
- },
415
- {
416
- "epoch": 2.17,
417
- "learning_rate": 6.654285714285716e-06,
418
- "loss": 0.0539,
419
- "step": 1675
420
- },
421
- {
422
- "epoch": 2.2,
423
- "learning_rate": 6.582857142857143e-06,
424
- "loss": 0.0502,
425
- "step": 1700
426
- },
427
- {
428
- "epoch": 2.23,
429
- "learning_rate": 6.511428571428572e-06,
430
- "loss": 0.0572,
431
- "step": 1725
432
- },
433
- {
434
- "epoch": 2.26,
435
- "learning_rate": 6.440000000000001e-06,
436
- "loss": 0.0555,
437
- "step": 1750
438
- },
439
- {
440
- "epoch": 2.3,
441
- "learning_rate": 6.368571428571429e-06,
442
- "loss": 0.0568,
443
- "step": 1775
444
- },
445
- {
446
- "epoch": 2.33,
447
- "learning_rate": 6.297142857142857e-06,
448
- "loss": 0.053,
449
- "step": 1800
450
- },
451
- {
452
- "epoch": 2.36,
453
- "learning_rate": 6.225714285714286e-06,
454
- "loss": 0.0501,
455
- "step": 1825
456
- },
457
- {
458
- "epoch": 2.39,
459
- "learning_rate": 6.1542857142857145e-06,
460
- "loss": 0.0521,
461
- "step": 1850
462
- },
463
- {
464
- "epoch": 2.43,
465
- "learning_rate": 6.0828571428571435e-06,
466
- "loss": 0.0518,
467
- "step": 1875
468
- },
469
- {
470
- "epoch": 2.46,
471
- "learning_rate": 6.011428571428572e-06,
472
- "loss": 0.0537,
473
- "step": 1900
474
- },
475
- {
476
- "epoch": 2.49,
477
- "learning_rate": 5.94e-06,
478
- "loss": 0.0554,
479
- "step": 1925
480
- },
481
- {
482
- "epoch": 2.52,
483
- "learning_rate": 5.868571428571429e-06,
484
- "loss": 0.0515,
485
- "step": 1950
486
- },
487
- {
488
- "epoch": 2.55,
489
- "learning_rate": 5.797142857142858e-06,
490
- "loss": 0.0548,
491
- "step": 1975
492
- },
493
- {
494
- "epoch": 2.59,
495
- "learning_rate": 5.725714285714287e-06,
496
- "loss": 0.052,
497
- "step": 2000
498
- },
499
- {
500
- "epoch": 2.59,
501
- "eval_loss": 0.2855091094970703,
502
- "eval_runtime": 1971.7129,
503
- "eval_samples_per_second": 2.571,
504
- "eval_steps_per_second": 0.322,
505
- "eval_wer": 20.332876233497352,
506
- "step": 2000
507
- }
508
- ],
509
- "max_steps": 4000,
510
- "num_train_epochs": 6,
511
- "total_flos": 9.23011544383488e+18,
512
- "trial_name": null,
513
- "trial_params": null
514
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7e84bbacb78e09cd8ee7d524f70614635cbf5f954a28de4f50bb8ea71406a63
3
- size 3567