File size: 17,204 Bytes
cfee215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
/usr/data/condor/execute/dir_32133/whisper_slu
WhisperSLU(
  (model): PeftModel(
    (base_model): LoraModel(
      (model): WhisperModel(
        (encoder): WhisperEncoder(
          (conv1): Conv1d(80, 768, kernel_size=(3,), stride=(1,), padding=(1,))
          (conv2): Conv1d(768, 768, kernel_size=(3,), stride=(2,), padding=(1,))
          (embed_positions): Embedding(1500, 768)
          (layers): ModuleList(
            (0-11): 12 x WhisperEncoderLayer(
              (self_attn): WhisperAttention(
                (k_proj): Linear(in_features=768, out_features=768, bias=False)
                (v_proj): Linear(in_features=768, out_features=768, bias=True)
                (q_proj): Linear(in_features=768, out_features=768, bias=True)
                (out_proj): Linear(in_features=768, out_features=768, bias=True)
              )
              (self_attn_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
              (activation_fn): GELUActivation()
              (fc1): Linear(in_features=768, out_features=3072, bias=True)
              (fc2): Linear(in_features=3072, out_features=768, bias=True)
              (final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
            )
          )
          (layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
        )
        (decoder): WhisperDecoder(
          (embed_tokens): Embedding(51865, 768, padding_idx=50257)
          (embed_positions): WhisperPositionalEmbedding(448, 768)
          (layers): ModuleList(
            (0-11): 12 x WhisperDecoderLayer(
              (self_attn): WhisperAttention(
                (k_proj): Linear(in_features=768, out_features=768, bias=False)
                (v_proj): lora.Linear(
                  (base_layer): Linear(in_features=768, out_features=768, bias=True)
                  (lora_dropout): ModuleDict(
                    (default): Dropout(p=0.1, inplace=False)
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=768, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=768, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                )
                (q_proj): lora.Linear(
                  (base_layer): Linear(in_features=768, out_features=768, bias=True)
                  (lora_dropout): ModuleDict(
                    (default): Dropout(p=0.1, inplace=False)
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=768, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=768, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                )
                (out_proj): Linear(in_features=768, out_features=768, bias=True)
              )
              (activation_fn): GELUActivation()
              (self_attn_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
              (encoder_attn): WhisperAttention(
                (k_proj): Linear(in_features=768, out_features=768, bias=False)
                (v_proj): lora.Linear(
                  (base_layer): Linear(in_features=768, out_features=768, bias=True)
                  (lora_dropout): ModuleDict(
                    (default): Dropout(p=0.1, inplace=False)
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=768, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=768, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                )
                (q_proj): lora.Linear(
                  (base_layer): Linear(in_features=768, out_features=768, bias=True)
                  (lora_dropout): ModuleDict(
                    (default): Dropout(p=0.1, inplace=False)
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=768, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=768, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                )
                (out_proj): Linear(in_features=768, out_features=768, bias=True)
              )
              (encoder_attn_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
              (fc1): Linear(in_features=768, out_features=3072, bias=True)
              (fc2): Linear(in_features=3072, out_features=768, bias=True)
              (final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
            )
          )
          (layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
        )
      )
    )
  )
  (proj_out): Linear(in_features=768, out_features=51865, bias=False)
  (classifier): WhisperClassificationHead(
    (embed_positions): WhisperPositionalEmbedding(448, 768)
    (layers): ModuleList(
      (0-1): 2 x WhisperEncoderLayer(
        (self_attn): WhisperAttention(
          (k_proj): Linear(in_features=768, out_features=768, bias=False)
          (v_proj): Linear(in_features=768, out_features=768, bias=True)
          (q_proj): Linear(in_features=768, out_features=768, bias=True)
          (out_proj): Linear(in_features=768, out_features=768, bias=True)
        )
        (self_attn_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
        (activation_fn): GELUActivation()
        (fc1): Linear(in_features=768, out_features=2048, bias=True)
        (fc2): Linear(in_features=2048, out_features=768, bias=True)
        (final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
      )
    )
    (layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
    (out_proj): Linear(in_features=768, out_features=37, bias=True)
    (crf): ConditionalRandomField()
  )
)
{'loss': 0.0467, 'learning_rate': 5e-05, 'epoch': 0.18}
{'loss': 0.0433, 'learning_rate': 4.994863481875841e-05, 'epoch': 0.36}
{'eval_loss': 0.05231842026114464, 'eval_f1_score': 0.6251468860164512, 'eval_label_f1': 0.8319623971797884, 'eval_wer': 0.10434244136543076, 'eval_runtime': 457.2641, 'eval_samples_per_second': 2.187, 'eval_steps_per_second': 0.547, 'epoch': 0.36}
{'loss': 0.0402, 'learning_rate': 4.979475034558115e-05, 'epoch': 0.54}
{'loss': 0.0391, 'learning_rate': 4.9538978924776634e-05, 'epoch': 0.71}
{'eval_loss': 0.050447478890419006, 'eval_f1_score': 0.6206896551724138, 'eval_label_f1': 0.8346394984326019, 'eval_wer': 0.10472946822509482, 'eval_runtime': 457.7184, 'eval_samples_per_second': 2.185, 'eval_steps_per_second': 0.546, 'epoch': 0.71}
{'loss': 0.037, 'learning_rate': 4.9182371575975736e-05, 'epoch': 0.89}
{'loss': 0.0381, 'learning_rate': 4.8726393675266716e-05, 'epoch': 1.07}
{'eval_loss': 0.049576789140701294, 'eval_f1_score': 0.6142467886337096, 'eval_label_f1': 0.8322304398598676, 'eval_wer': 0.10654849446551591, 'eval_runtime': 458.238, 'eval_samples_per_second': 2.182, 'eval_steps_per_second': 0.546, 'epoch': 1.07}
{'loss': 0.037, 'learning_rate': 4.817291893365055e-05, 'epoch': 1.25}
{'loss': 0.0374, 'learning_rate': 4.752422169756048e-05, 'epoch': 1.43}
{'eval_loss': 0.04838084056973457, 'eval_f1_score': 0.6158081006685017, 'eval_label_f1': 0.8360204482894219, 'eval_wer': 0.10705162938307919, 'eval_runtime': 456.7741, 'eval_samples_per_second': 2.189, 'eval_steps_per_second': 0.547, 'epoch': 1.43}
{'loss': 0.0375, 'learning_rate': 4.678296760308474e-05, 'epoch': 1.61}
{'loss': 0.0374, 'learning_rate': 4.595220262229601e-05, 'epoch': 1.79}
{'eval_loss': 0.047435563057661057, 'eval_f1_score': 0.6155063291139241, 'eval_label_f1': 0.8370253164556962, 'eval_wer': 0.10693552132517997, 'eval_runtime': 463.3137, 'eval_samples_per_second': 2.158, 'eval_steps_per_second': 0.54, 'epoch': 1.79}
{'loss': 0.0379, 'learning_rate': 4.503534054669892e-05, 'epoch': 1.97}
{'loss': 0.0342, 'learning_rate': 4.4036148959228365e-05, 'epoch': 2.14}
{'eval_loss': 0.04737536609172821, 'eval_f1_score': 0.6118110236220473, 'eval_label_f1': 0.8362204724409448, 'eval_wer': 0.1077095750445081, 'eval_runtime': 458.8739, 'eval_samples_per_second': 2.179, 'eval_steps_per_second': 0.545, 'epoch': 2.14}
{'loss': 0.0345, 'learning_rate': 4.2958733752443195e-05, 'epoch': 2.32}
{'loss': 0.0362, 'learning_rate': 4.180752225653292e-05, 'epoch': 2.5}
{'eval_loss': 0.04683598503470421, 'eval_f1_score': 0.6138147566718996, 'eval_label_f1': 0.8375196232339089, 'eval_wer': 0.10790308847434012, 'eval_runtime': 458.5195, 'eval_samples_per_second': 2.181, 'eval_steps_per_second': 0.545, 'epoch': 2.5}
{'loss': 0.0343, 'learning_rate': 4.058724504646834e-05, 'epoch': 2.68}
{'loss': 0.0351, 'learning_rate': 3.9302916503054246e-05, 'epoch': 2.86}
{'eval_loss': 0.04606114700436592, 'eval_f1_score': 0.6101960784313726, 'eval_label_f1': 0.836078431372549, 'eval_wer': 0.10821270996207137, 'eval_runtime': 460.3927, 'eval_samples_per_second': 2.172, 'eval_steps_per_second': 0.543, 'epoch': 2.86}
{'loss': 0.0355, 'learning_rate': 3.7959814207763135e-05, 'epoch': 3.04}
{'loss': 0.0339, 'learning_rate': 3.656345725602089e-05, 'epoch': 3.22}
{'eval_loss': 0.046606115996837616, 'eval_f1_score': 0.611111111111111, 'eval_label_f1': 0.838810641627543, 'eval_wer': 0.10794179116030653, 'eval_runtime': 460.841, 'eval_samples_per_second': 2.17, 'eval_steps_per_second': 0.542, 'epoch': 3.22}
{'loss': 0.0333, 'learning_rate': 3.5119583578059846e-05, 'epoch': 3.4}
{'loss': 0.0323, 'learning_rate': 3.363412636053269e-05, 'epoch': 3.57}
{'eval_loss': 0.04674151912331581, 'eval_f1_score': 0.6168371361132967, 'eval_label_f1': 0.8418568056648308, 'eval_wer': 0.10879325025156746, 'eval_runtime': 460.8468, 'eval_samples_per_second': 2.17, 'eval_steps_per_second': 0.542, 'epoch': 3.57}
{'loss': 0.0332, 'learning_rate': 3.211318966577581e-05, 'epoch': 3.75}
{'loss': 0.0338, 'learning_rate': 3.056302334890786e-05, 'epoch': 3.93}
{'eval_loss': 0.0457298718392849, 'eval_f1_score': 0.6093443266588143, 'eval_label_f1': 0.8425598743619945, 'eval_wer': 0.10856103413576902, 'eval_runtime': 456.4605, 'eval_samples_per_second': 2.191, 'eval_steps_per_second': 0.548, 'epoch': 3.93}
{'loss': 0.0333, 'learning_rate': 2.8989997375834482e-05, 'epoch': 4.11}
{'loss': 0.032, 'learning_rate': 2.7400575647692046e-05, 'epoch': 4.29}
{'eval_loss': 0.045234858989715576, 'eval_f1_score': 0.6090404440919905, 'eval_label_f1': 0.8398096748612214, 'eval_wer': 0.10852233144980261, 'eval_runtime': 457.189, 'eval_samples_per_second': 2.187, 'eval_steps_per_second': 0.547, 'epoch': 4.29}
{'loss': 0.0335, 'learning_rate': 2.5801289439291388e-05, 'epoch': 4.47}
{'loss': 0.0307, 'learning_rate': 2.419871056070862e-05, 'epoch': 4.65}
{'eval_loss': 0.04505770280957222, 'eval_f1_score': 0.6139315230224321, 'eval_label_f1': 0.8421881149153877, 'eval_wer': 0.10859973682173543, 'eval_runtime': 456.5336, 'eval_samples_per_second': 2.19, 'eval_steps_per_second': 0.548, 'epoch': 4.65}
{'loss': 0.0324, 'learning_rate': 2.2599424352307957e-05, 'epoch': 4.83}
{'loss': 0.0321, 'learning_rate': 2.1010002624165527e-05, 'epoch': 5.0}
{'eval_loss': 0.04516015574336052, 'eval_f1_score': 0.6115702479338844, 'eval_label_f1': 0.8398268398268398, 'eval_wer': 0.10829011533400418, 'eval_runtime': 459.143, 'eval_samples_per_second': 2.178, 'eval_steps_per_second': 0.544, 'epoch': 5.0}
{'loss': 0.0303, 'learning_rate': 1.9436976651092144e-05, 'epoch': 5.18}
{'loss': 0.0313, 'learning_rate': 1.7886810334224192e-05, 'epoch': 5.36}
{'eval_loss': 0.044775452464818954, 'eval_f1_score': 0.6116352201257862, 'eval_label_f1': 0.8404088050314464, 'eval_wer': 0.10921897979719793, 'eval_runtime': 458.1981, 'eval_samples_per_second': 2.182, 'eval_steps_per_second': 0.546, 'epoch': 5.36}
{'loss': 0.0321, 'learning_rate': 1.6365873639467315e-05, 'epoch': 5.54}
{'loss': 0.0309, 'learning_rate': 1.4880416421940155e-05, 'epoch': 5.72}
{'eval_loss': 0.04491139575839043, 'eval_f1_score': 0.6109148017275227, 'eval_label_f1': 0.8402041617589321, 'eval_wer': 0.10832881801997059, 'eval_runtime': 457.2993, 'eval_samples_per_second': 2.187, 'eval_steps_per_second': 0.547, 'epoch': 5.72}
{'loss': 0.0318, 'learning_rate': 1.3436542743979125e-05, 'epoch': 5.9}
{'loss': 0.0305, 'learning_rate': 1.2040185792236874e-05, 'epoch': 6.08}
{'eval_loss': 0.0448303148150444, 'eval_f1_score': 0.6085590891244601, 'eval_label_f1': 0.8402041617589321, 'eval_wer': 0.10825141264803777, 'eval_runtime': 459.7209, 'eval_samples_per_second': 2.175, 'eval_steps_per_second': 0.544, 'epoch': 6.08}
{'loss': 0.0302, 'learning_rate': 1.0697083496945765e-05, 'epoch': 6.26}
{'loss': 0.0301, 'learning_rate': 9.412754953531663e-06, 'epoch': 6.43}
{'eval_loss': 0.044706329703330994, 'eval_f1_score': 0.6115702479338844, 'eval_label_f1': 0.8374655647382919, 'eval_wer': 0.10813530459013855, 'eval_runtime': 459.3177, 'eval_samples_per_second': 2.177, 'eval_steps_per_second': 0.544, 'epoch': 6.43}
{'loss': 0.0304, 'learning_rate': 8.192477743467078e-06, 'epoch': 6.61}
{'loss': 0.03, 'learning_rate': 7.041266247556813e-06, 'epoch': 6.79}
{'eval_loss': 0.04461517930030823, 'eval_f1_score': 0.6103434662455587, 'eval_label_f1': 0.8401105408606395, 'eval_wer': 0.10867714219366824, 'eval_runtime': 456.0173, 'eval_samples_per_second': 2.193, 'eval_steps_per_second': 0.548, 'epoch': 6.79}
{'loss': 0.0314, 'learning_rate': 5.9638510407716394e-06, 'epoch': 6.97}
{'loss': 0.0302, 'learning_rate': 4.9646594533010875e-06, 'epoch': 7.15}
{'eval_loss': 0.04448845237493515, 'eval_f1_score': 0.6120047077285209, 'eval_label_f1': 0.8387602981561396, 'eval_wer': 0.108367520705937, 'eval_runtime': 457.6573, 'eval_samples_per_second': 2.185, 'eval_steps_per_second': 0.546, 'epoch': 7.15}
{'loss': 0.0309, 'learning_rate': 4.047797377703985e-06, 'epoch': 7.33}
{'loss': 0.0294, 'learning_rate': 3.217032396915265e-06, 'epoch': 7.51}
{'eval_loss': 0.044213637709617615, 'eval_f1_score': 0.6132075471698112, 'eval_label_f1': 0.8396226415094339, 'eval_wer': 0.10863843950770183, 'eval_runtime': 455.3785, 'eval_samples_per_second': 2.196, 'eval_steps_per_second': 0.549, 'epoch': 7.51}
{'loss': 0.0296, 'learning_rate': 2.475778302439524e-06, 'epoch': 7.69}
{'loss': 0.03, 'learning_rate': 1.827081066349459e-06, 'epoch': 7.86}
{'eval_loss': 0.04440901055932045, 'eval_f1_score': 0.6111547525530244, 'eval_label_f1': 0.8381775333857032, 'eval_wer': 0.10879325025156746, 'eval_runtime': 458.0063, 'eval_samples_per_second': 2.183, 'eval_steps_per_second': 0.546, 'epoch': 7.86}
{'loss': 0.0301, 'learning_rate': 1.273606324733284e-06, 'epoch': 8.04}
{'loss': 0.03, 'learning_rate': 8.176284240242638e-07, 'epoch': 8.22}
{'eval_loss': 0.04447592794895172, 'eval_f1_score': 0.6109148017275227, 'eval_label_f1': 0.8370632116215156, 'eval_wer': 0.10871584487963465, 'eval_runtime': 458.4151, 'eval_samples_per_second': 2.181, 'eval_steps_per_second': 0.545, 'epoch': 8.22}
{'loss': 0.0307, 'learning_rate': 4.6102107522336403e-07, 'epoch': 8.4}
{'loss': 0.0296, 'learning_rate': 2.052496544188487e-07, 'epoch': 8.58}
{'eval_loss': 0.04443991929292679, 'eval_f1_score': 0.6117000392618768, 'eval_label_f1': 0.8378484491558698, 'eval_wer': 0.1084062233919034, 'eval_runtime': 455.2323, 'eval_samples_per_second': 2.197, 'eval_steps_per_second': 0.549, 'epoch': 8.58}
{'loss': 0.0288, 'learning_rate': 5.136518124159162e-08, 'epoch': 8.76}
{'loss': 0.0297, 'learning_rate': 0.0, 'epoch': 8.94}
{'eval_loss': 0.04443608224391937, 'eval_f1_score': 0.6098231827111984, 'eval_label_f1': 0.8369351669941061, 'eval_wer': 0.10856103413576902, 'eval_runtime': 456.0761, 'eval_samples_per_second': 2.193, 'eval_steps_per_second': 0.548, 'epoch': 8.94}
{'train_runtime': 46949.8286, 'train_samples_per_second': 13.632, 'train_steps_per_second': 0.106, 'train_loss': 0.03340700387954712, 'epoch': 8.94}
***** train metrics *****
  epoch                    =        8.94
  train_loss               =      0.0334
  train_runtime            = 13:02:29.82
  train_samples_per_second =      13.632
  train_steps_per_second   =       0.106
{'eval_loss': 0.04443608224391937, 'eval_f1_score': 0.6098231827111984, 'eval_label_f1': 0.8369351669941061, 'eval_wer': 0.10856103413576902, 'eval_runtime': 457.7019, 'eval_samples_per_second': 2.185, 'eval_steps_per_second': 0.546, 'epoch': 8.94}
***** eval metrics *****
  epoch                   =       8.94
  eval_f1_score           =     0.6098
  eval_label_f1           =     0.8369
  eval_loss               =     0.0444
  eval_runtime            = 0:07:37.70
  eval_samples            =       1000
  eval_samples_per_second =      2.185
  eval_steps_per_second   =      0.546
  eval_wer                =     0.1086