Silemo commited on
Commit
f2e2a42
1 Parent(s): f69217e

Training in progress, step 400, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe3597c122a2e5567b331b531c1b0cf98c2606dfe5fc98bc664181d12b2b83cd
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12140b3527e8c387ab119cfaeb385ddf7ee08efb2029bcede8b49ffa881bc50e
3
  size 966995080
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5feb1207165a2472ed0fc4a2352d29febeb73effc6fad6dbd900bc416ac92d15
3
  size 1925064044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69d8260db719def1ed71c3ac4aef34515c81b7554c540c6bf09b8c6ef1774fbf
3
  size 1925064044
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4fd91acef56149d62ef60f134f6bef8f3143b1426e8731dcb1c5449312d3ea8c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:309d78c0cbac356af547d46b1e70776079ec1d34f37e987fe5340ad766fae6b9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f9b9e6716d4845c0461a67674925be51cb5bd879f6bddb03b17fa941754a7de
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ff4109b85e31dcf014a99ae517a55071a006425bd4bf896b18ef8c2d279509
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 176.42124237382143,
3
- "best_model_checkpoint": "./whisper-it/checkpoint-100",
4
- "epoch": 0.19083969465648856,
5
  "eval_steps": 100,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -40,6 +40,105 @@
40
  "eval_steps_per_second": 0.095,
41
  "eval_wer": 176.42124237382143,
42
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  }
44
  ],
45
  "logging_steps": 25,
@@ -47,7 +146,7 @@
47
  "num_input_tokens_seen": 0,
48
  "num_train_epochs": 8,
49
  "save_steps": 100,
50
- "total_flos": 9.23473281024e+17,
51
  "trial_name": null,
52
  "trial_params": null
53
  }
 
1
  {
2
+ "best_metric": 70.1955074875208,
3
+ "best_model_checkpoint": "./whisper-it/checkpoint-300",
4
+ "epoch": 0.7633587786259542,
5
  "eval_steps": 100,
6
+ "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
40
  "eval_steps_per_second": 0.095,
41
  "eval_wer": 176.42124237382143,
42
  "step": 100
43
+ },
44
+ {
45
+ "epoch": 0.24,
46
+ "learning_rate": 2.4000000000000003e-06,
47
+ "loss": 0.9794,
48
+ "step": 125
49
+ },
50
+ {
51
+ "epoch": 0.29,
52
+ "learning_rate": 2.9e-06,
53
+ "loss": 0.8642,
54
+ "step": 150
55
+ },
56
+ {
57
+ "epoch": 0.33,
58
+ "learning_rate": 3.4000000000000005e-06,
59
+ "loss": 0.8044,
60
+ "step": 175
61
+ },
62
+ {
63
+ "epoch": 0.38,
64
+ "learning_rate": 3.88e-06,
65
+ "loss": 0.7389,
66
+ "step": 200
67
+ },
68
+ {
69
+ "epoch": 0.38,
70
+ "eval_loss": 0.8330782055854797,
71
+ "eval_runtime": 1755.11,
72
+ "eval_samples_per_second": 0.855,
73
+ "eval_steps_per_second": 0.107,
74
+ "eval_wer": 80.49084858569051,
75
+ "step": 200
76
+ },
77
+ {
78
+ "epoch": 0.43,
79
+ "learning_rate": 4.38e-06,
80
+ "loss": 0.6293,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 0.48,
85
+ "learning_rate": 4.880000000000001e-06,
86
+ "loss": 0.5066,
87
+ "step": 250
88
+ },
89
+ {
90
+ "epoch": 0.52,
91
+ "learning_rate": 5.380000000000001e-06,
92
+ "loss": 0.3526,
93
+ "step": 275
94
+ },
95
+ {
96
+ "epoch": 0.57,
97
+ "learning_rate": 5.8800000000000005e-06,
98
+ "loss": 0.2951,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.57,
103
+ "eval_loss": 0.4260523319244385,
104
+ "eval_runtime": 1708.1953,
105
+ "eval_samples_per_second": 0.878,
106
+ "eval_steps_per_second": 0.11,
107
+ "eval_wer": 70.1955074875208,
108
+ "step": 300
109
+ },
110
+ {
111
+ "epoch": 0.62,
112
+ "learning_rate": 6.380000000000001e-06,
113
+ "loss": 0.2553,
114
+ "step": 325
115
+ },
116
+ {
117
+ "epoch": 0.67,
118
+ "learning_rate": 6.88e-06,
119
+ "loss": 0.2744,
120
+ "step": 350
121
+ },
122
+ {
123
+ "epoch": 0.72,
124
+ "learning_rate": 7.3800000000000005e-06,
125
+ "loss": 0.279,
126
+ "step": 375
127
+ },
128
+ {
129
+ "epoch": 0.76,
130
+ "learning_rate": 7.88e-06,
131
+ "loss": 0.2703,
132
+ "step": 400
133
+ },
134
+ {
135
+ "epoch": 0.76,
136
+ "eval_loss": 0.40512609481811523,
137
+ "eval_runtime": 1849.1681,
138
+ "eval_samples_per_second": 0.811,
139
+ "eval_steps_per_second": 0.102,
140
+ "eval_wer": 101.62922906267333,
141
+ "step": 400
142
  }
143
  ],
144
  "logging_steps": 25,
 
146
  "num_input_tokens_seen": 0,
147
  "num_train_epochs": 8,
148
  "save_steps": 100,
149
+ "total_flos": 3.693893124096e+18,
150
  "trial_name": null,
151
  "trial_params": null
152
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0deaaf9b4dc785b95bcfecba732d9f20738cd6d99cb5f2d674c46008c563ff98
3
- size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6afd188b1bb5040b1e1647512623a4ea330124b3938572fb5350dd1ea4ab41d
3
+ size 4792