sampras343
commited on
Commit
•
b39543a
1
Parent(s):
fa2168c
first commit
Browse files- README.md +5 -0
- config.json +117 -0
- nncf_config.json +36 -0
- ov_model.bin +3 -0
- ov_model.xml +0 -0
- preprocessor_config.json +36 -0
- wav2vec.py +93 -0
README.md
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[anton-l/wav2vec2-base-ft-keyword-spotting](https://huggingface.co/anton-l/wav2vec2-base-ft-keyword-spotting) model quantized with [Optimum OpenVINO](https://github.com/dkurt/optimum-openvino/).
|
2 |
+
|
3 |
+
| Accuracy on eval (baseline) | Accuracy on eval (quantized) |
|
4 |
+
|-----------------------------|----------------------------------------|
|
5 |
+
| 0.9828 | 0.9553 (-0.0274) |
|
config.json
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-base",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForSequenceClassification"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"classifier_proj_size": 256,
|
11 |
+
"codevector_dim": 256,
|
12 |
+
"contrastive_logits_temperature": 0.1,
|
13 |
+
"conv_bias": false,
|
14 |
+
"conv_dim": [
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512
|
22 |
+
],
|
23 |
+
"conv_kernel": [
|
24 |
+
10,
|
25 |
+
3,
|
26 |
+
3,
|
27 |
+
3,
|
28 |
+
3,
|
29 |
+
2,
|
30 |
+
2
|
31 |
+
],
|
32 |
+
"conv_stride": [
|
33 |
+
5,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2
|
40 |
+
],
|
41 |
+
"ctc_loss_reduction": "sum",
|
42 |
+
"ctc_zero_infinity": false,
|
43 |
+
"diversity_loss_weight": 0.1,
|
44 |
+
"do_stable_layer_norm": false,
|
45 |
+
"eos_token_id": 2,
|
46 |
+
"feat_extract_activation": "gelu",
|
47 |
+
"feat_extract_norm": "group",
|
48 |
+
"feat_proj_dropout": 0.1,
|
49 |
+
"feat_quantizer_dropout": 0.0,
|
50 |
+
"final_dropout": 0.0,
|
51 |
+
"finetuning_task": "audio-classification",
|
52 |
+
"freeze_feat_extract_train": true,
|
53 |
+
"hidden_act": "gelu",
|
54 |
+
"hidden_dropout": 0.1,
|
55 |
+
"hidden_size": 768,
|
56 |
+
"id2label": {
|
57 |
+
"0": "yes",
|
58 |
+
"1": "no",
|
59 |
+
"10": "_silence_",
|
60 |
+
"11": "_unknown_",
|
61 |
+
"2": "up",
|
62 |
+
"3": "down",
|
63 |
+
"4": "left",
|
64 |
+
"5": "right",
|
65 |
+
"6": "on",
|
66 |
+
"7": "off",
|
67 |
+
"8": "stop",
|
68 |
+
"9": "go"
|
69 |
+
},
|
70 |
+
"initializer_range": 0.02,
|
71 |
+
"intermediate_size": 3072,
|
72 |
+
"label2id": {
|
73 |
+
"_silence_": "10",
|
74 |
+
"_unknown_": "11",
|
75 |
+
"down": "3",
|
76 |
+
"go": "9",
|
77 |
+
"left": "4",
|
78 |
+
"no": "1",
|
79 |
+
"off": "7",
|
80 |
+
"on": "6",
|
81 |
+
"right": "5",
|
82 |
+
"stop": "8",
|
83 |
+
"up": "2",
|
84 |
+
"yes": "0"
|
85 |
+
},
|
86 |
+
"layer_norm_eps": 1e-05,
|
87 |
+
"layerdrop": 0.05,
|
88 |
+
"mask_channel_length": 10,
|
89 |
+
"mask_channel_min_space": 1,
|
90 |
+
"mask_channel_other": 0.0,
|
91 |
+
"mask_channel_prob": 0.0,
|
92 |
+
"mask_channel_selection": "static",
|
93 |
+
"mask_feature_length": 10,
|
94 |
+
"mask_feature_prob": 0.0,
|
95 |
+
"mask_time_length": 10,
|
96 |
+
"mask_time_min_space": 1,
|
97 |
+
"mask_time_other": 0.0,
|
98 |
+
"mask_time_prob": 0.05,
|
99 |
+
"mask_time_selection": "static",
|
100 |
+
"model_type": "wav2vec2",
|
101 |
+
"no_mask_channel_overlap": false,
|
102 |
+
"no_mask_time_overlap": false,
|
103 |
+
"num_attention_heads": 12,
|
104 |
+
"num_codevector_groups": 2,
|
105 |
+
"num_codevectors_per_group": 320,
|
106 |
+
"num_conv_pos_embedding_groups": 16,
|
107 |
+
"num_conv_pos_embeddings": 128,
|
108 |
+
"num_feat_extract_layers": 7,
|
109 |
+
"num_hidden_layers": 12,
|
110 |
+
"num_negatives": 100,
|
111 |
+
"pad_token_id": 0,
|
112 |
+
"proj_codevector_dim": 256,
|
113 |
+
"torch_dtype": "float32",
|
114 |
+
"transformers_version": "4.12.0.dev0",
|
115 |
+
"use_weighted_layer_sum": false,
|
116 |
+
"vocab_size": 32
|
117 |
+
}
|
nncf_config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"input_info": [
|
3 |
+
{
|
4 |
+
"sample_size": [1, 16000],
|
5 |
+
"type": "float"
|
6 |
+
}
|
7 |
+
],
|
8 |
+
"compression": {
|
9 |
+
"algorithm": "quantization",
|
10 |
+
"initializer": {
|
11 |
+
"range": {
|
12 |
+
"num_init_samples": 32,
|
13 |
+
"type": "percentile",
|
14 |
+
"params":
|
15 |
+
{
|
16 |
+
"min_percentile": 0.01,
|
17 |
+
"max_percentile": 99.99
|
18 |
+
}
|
19 |
+
},
|
20 |
+
|
21 |
+
"batchnorm_adaptation": {
|
22 |
+
"num_bn_adaptation_samples": 200
|
23 |
+
}
|
24 |
+
},
|
25 |
+
"activations":
|
26 |
+
{
|
27 |
+
"mode": "asymmetric"
|
28 |
+
},
|
29 |
+
"weights":
|
30 |
+
{
|
31 |
+
"mode": "symmetric",
|
32 |
+
"signed": true,
|
33 |
+
"per_channel": true
|
34 |
+
}
|
35 |
+
}
|
36 |
+
}
|
ov_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:229e84a458770e5bf4de7dcdcd5e0d55f8bac42287e5c8d2304c123eb08dcb90
|
3 |
+
size 95305472
|
ov_model.xml
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"input_info": [
|
3 |
+
{
|
4 |
+
"sample_size": [1, 16000],
|
5 |
+
"type": "float"
|
6 |
+
}
|
7 |
+
],
|
8 |
+
"compression": {
|
9 |
+
"algorithm": "quantization",
|
10 |
+
"initializer": {
|
11 |
+
"range": {
|
12 |
+
"num_init_samples": 32,
|
13 |
+
"type": "percentile",
|
14 |
+
"params":
|
15 |
+
{
|
16 |
+
"min_percentile": 0.01,
|
17 |
+
"max_percentile": 99.99
|
18 |
+
}
|
19 |
+
},
|
20 |
+
|
21 |
+
"batchnorm_adaptation": {
|
22 |
+
"num_bn_adaptation_samples": 200
|
23 |
+
}
|
24 |
+
},
|
25 |
+
"activations":
|
26 |
+
{
|
27 |
+
"mode": "asymmetric"
|
28 |
+
},
|
29 |
+
"weights":
|
30 |
+
{
|
31 |
+
"mode": "symmetric",
|
32 |
+
"signed": true,
|
33 |
+
"per_channel": true
|
34 |
+
}
|
35 |
+
}
|
36 |
+
}
|
wav2vec.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Copyright (C) 2021-2022 Intel Corporation
|
4 |
+
|
5 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
you may not use this file except in compliance with the License.
|
7 |
+
You may obtain a copy of the License at
|
8 |
+
|
9 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
|
11 |
+
Unless required by applicable law or agreed to in writing, software
|
12 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
See the License for the specific language governing permissions and
|
15 |
+
limitations under the License.
|
16 |
+
"""
|
17 |
+
|
18 |
+
from argparse import ArgumentParser, SUPPRESS
|
19 |
+
from itertools import groupby
|
20 |
+
import json
|
21 |
+
import logging as log
|
22 |
+
from pathlib import Path
|
23 |
+
from time import perf_counter
|
24 |
+
import sys
|
25 |
+
|
26 |
+
import numpy as np
|
27 |
+
import wave
|
28 |
+
|
29 |
+
from openvino.inference_engine import IECore
|
30 |
+
|
31 |
+
ie = IECore()
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
class Wav2Vec:
|
36 |
+
alphabet = [
|
37 |
+
"<pad>", "<s>", "</s>", "<unk>", "|",
|
38 |
+
"e", "t", "a", "o", "n", "i", "h", "s", "r", "d", "l", "u",
|
39 |
+
"m", "w", "c", "f", "g", "y", "p", "b", "v", "k", "'", "x", "j", "q", "z"]
|
40 |
+
words_delimiter = '|'
|
41 |
+
pad_token = '<pad>'
|
42 |
+
def __init__(self):
|
43 |
+
self.nnet = ie.read_network("/home/intel/Documents/ASR/wav2vec2-base-ft-keyword-spotting-int8/ov_model.xml", "/home/intel/Documents/ASR/wav2vec2-base-ft-keyword-spotting-int8/ov_model.bin")
|
44 |
+
|
45 |
+
@staticmethod
|
46 |
+
def preprocess(sound):
|
47 |
+
return (sound - np.mean(sound)) / (np.std(sound) + 1e-15)
|
48 |
+
|
49 |
+
def infer(self, audio):
|
50 |
+
exec_net = ie.load_network(self.nnet, "CPU")
|
51 |
+
outss = exec_net.infer({"input_values": audio})
|
52 |
+
# input_data = {next(iter(self.nnet.input_info)): audio}
|
53 |
+
return outss
|
54 |
+
|
55 |
+
def decode(self, logits):
|
56 |
+
token_ids = np.squeeze(np.argmax(logits, -1))
|
57 |
+
tokens = [self.decoding_vocab[idx] for idx in token_ids]
|
58 |
+
tokens = [token_group[0] for token_group in groupby(tokens)]
|
59 |
+
tokens = [t for t in tokens if t != self.pad_token]
|
60 |
+
res_string = ''.join([t if t != self.words_delimiter else ' ' for t in tokens]).strip()
|
61 |
+
res_string = ' '.join(res_string.split(' '))
|
62 |
+
res_string = res_string.lower()
|
63 |
+
return res_string
|
64 |
+
|
65 |
+
def reshape(self, audio):
|
66 |
+
self.nnet.reshape({next(iter(self.nnet.input_info)): audio.shape})
|
67 |
+
|
68 |
+
def main():
|
69 |
+
model = Wav2Vec()
|
70 |
+
start_time = perf_counter()
|
71 |
+
with wave.open("/home/intel/Documents/ASR/applications.ai.conversational-ai.asr-grpc-security/client_sample_examples/python/audio_data_samples/how_are_you_doing.wav", 'rb') as wave_read:
|
72 |
+
channel_num, sample_width, sampling_rate, pcm_length, compression_type, _ = wave_read.getparams()
|
73 |
+
assert sample_width == 2, "Only 16-bit WAV PCM supported"
|
74 |
+
assert compression_type == 'NONE', "Only linear PCM WAV files supported"
|
75 |
+
assert channel_num == 1, "Only mono WAV PCM supported"
|
76 |
+
assert sampling_rate == 16000, "Only 16 KHz audio supported"
|
77 |
+
audio = np.frombuffer(wave_read.readframes(pcm_length * channel_num), dtype=np.int16).reshape((1, pcm_length))
|
78 |
+
audio = audio.astype(float) / np.iinfo(np.int16).max
|
79 |
+
|
80 |
+
normalized_audio = model.preprocess(audio)
|
81 |
+
model.reshape(normalized_audio)
|
82 |
+
character_probs = model.infer(normalized_audio)
|
83 |
+
print(type(character_probs))
|
84 |
+
print(character_probs.keys())
|
85 |
+
transcription = model.decode(character_probs["3761"])
|
86 |
+
total_latency = (perf_counter() - start_time) * 1e3
|
87 |
+
# log.info("Metrics report:")
|
88 |
+
# log.info("\tLatency: {:.1f} ms".format(total_latency))
|
89 |
+
print(transcription)
|
90 |
+
print(total_latency)
|
91 |
+
|
92 |
+
if __name__ == '__main__':
|
93 |
+
sys.exit(main() or 0)
|