Seosnaps commited on
Commit
495986d
1 Parent(s): 19d2b52

Training in progress, step 2000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a3db2ff016b328cd68cc8f1cb89a07c1b47135a2e494934f6dde9ce800d2348
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:511578a0d482675e3ff17b4d731393ce3c283abd8d16f8a85a3f15d02a56e39f
3
  size 966995080
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6730363fb76f2edd6821e506a2abd45c7cc27ded1e1efd4c997f1c7b96767b08
3
  size 1925070764
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6ed9b6da13b19371de47f3a2b44715717ead8b5eeed3a91c863518d2a661a22
3
  size 1925070764
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7686bf6430a5fadcfa927b0a151b31a4e397fd869a789b35ecb4e699dd607b3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:811d52f8a469bde4a4138f47efe6e2b676d318980b3df9610e6fcc5abea00325
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0054bae4a1765c1151c5d499ec353895197f5e92e18df41e313afd3470bb8693
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3ea9d13baff2282d300ceb3c3984a3388d1450303ffc8640c73967fa3325903
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 81.14581337420962,
3
- "best_model_checkpoint": "./whisper-small-ha-adam-v4/checkpoint-1000",
4
- "epoch": 9.554140127388536,
5
  "eval_steps": 500,
6
- "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -457,6 +457,156 @@
457
  "eval_wer": 87.75627514849587,
458
  "eval_wer_ortho": 89.84375,
459
  "step": 1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
  }
461
  ],
462
  "logging_steps": 25,
@@ -476,7 +626,7 @@
476
  "attributes": {}
477
  }
478
  },
479
- "total_flos": 6.91566053326848e+18,
480
  "train_batch_size": 16,
481
  "trial_name": null,
482
  "trial_params": null
 
1
  {
2
+ "best_metric": 78.86568308105001,
3
+ "best_model_checkpoint": "./whisper-small-ha-adam-v4/checkpoint-2000",
4
+ "epoch": 12.738853503184714,
5
  "eval_steps": 500,
6
+ "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
457
  "eval_wer": 87.75627514849587,
458
  "eval_wer_ortho": 89.84375,
459
  "step": 1500
460
+ },
461
+ {
462
+ "epoch": 9.713375796178344,
463
+ "grad_norm": 2.170787811279297,
464
+ "learning_rate": 5e-05,
465
+ "loss": 0.0327,
466
+ "step": 1525
467
+ },
468
+ {
469
+ "epoch": 9.872611464968152,
470
+ "grad_norm": 2.1923575401306152,
471
+ "learning_rate": 5e-05,
472
+ "loss": 0.0472,
473
+ "step": 1550
474
+ },
475
+ {
476
+ "epoch": 10.031847133757962,
477
+ "grad_norm": 4.220789909362793,
478
+ "learning_rate": 5e-05,
479
+ "loss": 0.0463,
480
+ "step": 1575
481
+ },
482
+ {
483
+ "epoch": 10.19108280254777,
484
+ "grad_norm": 1.4491336345672607,
485
+ "learning_rate": 5e-05,
486
+ "loss": 0.0447,
487
+ "step": 1600
488
+ },
489
+ {
490
+ "epoch": 10.35031847133758,
491
+ "grad_norm": 3.3499913215637207,
492
+ "learning_rate": 5e-05,
493
+ "loss": 0.043,
494
+ "step": 1625
495
+ },
496
+ {
497
+ "epoch": 10.509554140127388,
498
+ "grad_norm": 2.196830987930298,
499
+ "learning_rate": 5e-05,
500
+ "loss": 0.035,
501
+ "step": 1650
502
+ },
503
+ {
504
+ "epoch": 10.668789808917197,
505
+ "grad_norm": 2.2914416790008545,
506
+ "learning_rate": 5e-05,
507
+ "loss": 0.0281,
508
+ "step": 1675
509
+ },
510
+ {
511
+ "epoch": 10.828025477707007,
512
+ "grad_norm": 2.437507152557373,
513
+ "learning_rate": 5e-05,
514
+ "loss": 0.0431,
515
+ "step": 1700
516
+ },
517
+ {
518
+ "epoch": 10.987261146496815,
519
+ "grad_norm": 5.599733352661133,
520
+ "learning_rate": 5e-05,
521
+ "loss": 0.0434,
522
+ "step": 1725
523
+ },
524
+ {
525
+ "epoch": 11.146496815286625,
526
+ "grad_norm": 3.811133861541748,
527
+ "learning_rate": 5e-05,
528
+ "loss": 0.0287,
529
+ "step": 1750
530
+ },
531
+ {
532
+ "epoch": 11.305732484076433,
533
+ "grad_norm": 3.298198938369751,
534
+ "learning_rate": 5e-05,
535
+ "loss": 0.0477,
536
+ "step": 1775
537
+ },
538
+ {
539
+ "epoch": 11.464968152866241,
540
+ "grad_norm": 1.9741543531417847,
541
+ "learning_rate": 5e-05,
542
+ "loss": 0.0423,
543
+ "step": 1800
544
+ },
545
+ {
546
+ "epoch": 11.624203821656051,
547
+ "grad_norm": 1.3877679109573364,
548
+ "learning_rate": 5e-05,
549
+ "loss": 0.033,
550
+ "step": 1825
551
+ },
552
+ {
553
+ "epoch": 11.78343949044586,
554
+ "grad_norm": 1.5006356239318848,
555
+ "learning_rate": 5e-05,
556
+ "loss": 0.0292,
557
+ "step": 1850
558
+ },
559
+ {
560
+ "epoch": 11.94267515923567,
561
+ "grad_norm": 2.8492929935455322,
562
+ "learning_rate": 5e-05,
563
+ "loss": 0.0273,
564
+ "step": 1875
565
+ },
566
+ {
567
+ "epoch": 12.101910828025478,
568
+ "grad_norm": 3.423665761947632,
569
+ "learning_rate": 5e-05,
570
+ "loss": 0.0234,
571
+ "step": 1900
572
+ },
573
+ {
574
+ "epoch": 12.261146496815286,
575
+ "grad_norm": 2.3133459091186523,
576
+ "learning_rate": 5e-05,
577
+ "loss": 0.0358,
578
+ "step": 1925
579
+ },
580
+ {
581
+ "epoch": 12.420382165605096,
582
+ "grad_norm": 3.495283603668213,
583
+ "learning_rate": 5e-05,
584
+ "loss": 0.0355,
585
+ "step": 1950
586
+ },
587
+ {
588
+ "epoch": 12.579617834394904,
589
+ "grad_norm": 1.5722227096557617,
590
+ "learning_rate": 5e-05,
591
+ "loss": 0.039,
592
+ "step": 1975
593
+ },
594
+ {
595
+ "epoch": 12.738853503184714,
596
+ "grad_norm": 3.156038522720337,
597
+ "learning_rate": 5e-05,
598
+ "loss": 0.0314,
599
+ "step": 2000
600
+ },
601
+ {
602
+ "epoch": 12.738853503184714,
603
+ "eval_loss": 2.215003490447998,
604
+ "eval_runtime": 260.0556,
605
+ "eval_samples_per_second": 2.538,
606
+ "eval_steps_per_second": 0.162,
607
+ "eval_wer": 78.86568308105001,
608
+ "eval_wer_ortho": 81.0546875,
609
+ "step": 2000
610
  }
611
  ],
612
  "logging_steps": 25,
 
626
  "attributes": {}
627
  }
628
  },
629
+ "total_flos": 9.22088071102464e+18,
630
  "train_batch_size": 16,
631
  "trial_name": null,
632
  "trial_params": null