joydeep bhattacharjee
commited on
Commit
•
b97106e
1
Parent(s):
861b010
odia model version1
Browse files- README.md +96 -0
- config.json +76 -0
- preprocessor_config.json +8 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- training_args.bin +3 -0
- vocab.json +1 -0
README.md
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: or
|
3 |
+
datasets:
|
4 |
+
- common_voice
|
5 |
+
metrics:
|
6 |
+
- wer
|
7 |
+
tags:
|
8 |
+
- audio
|
9 |
+
- automatic-speech-recognition
|
10 |
+
- speech
|
11 |
+
- xlsr-fine-tuning-week
|
12 |
+
license: apache-2.0
|
13 |
+
model-index:
|
14 |
+
- name: Joydeep Bhattacharjee XLSR Wav2Vec2 Large 53 Odia
|
15 |
+
results:
|
16 |
+
- task:
|
17 |
+
name: Speech Recognition
|
18 |
+
type: automatic-speech-recognition
|
19 |
+
dataset:
|
20 |
+
name: Common Voice as
|
21 |
+
type: common_voice
|
22 |
+
args: or
|
23 |
+
metrics:
|
24 |
+
- name: Test WER
|
25 |
+
type: wer
|
26 |
+
value: 55.07
|
27 |
+
---
|
28 |
+
# Wav2Vec2-Large-XLSR-53-Odia
|
29 |
+
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Odia using the [Common Voice](https://huggingface.co/datasets/common_voice).
|
30 |
+
When using this model, make sure that your speech input is sampled at 16kHz.
|
31 |
+
## Usage
|
32 |
+
The model can be used directly (without a language model) as follows:
|
33 |
+
```python
|
34 |
+
import torch
|
35 |
+
import torchaudio
|
36 |
+
from datasets import load_dataset
|
37 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
38 |
+
test_dataset = load_dataset("common_voice", "or", split="test[:2%]")
|
39 |
+
processor = Wav2Vec2Processor.from_pretrained("infinitejoy/Wav2Vec2-Large-XLSR-53-Odia")
|
40 |
+
model = Wav2Vec2ForCTC.from_pretrained("infinitejoy/Wav2Vec2-Large-XLSR-53-Odia")
|
41 |
+
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
42 |
+
# Preprocessing the datasets.
|
43 |
+
# We need to read the aduio files as arrays
|
44 |
+
def speech_file_to_array_fn(batch):
|
45 |
+
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
46 |
+
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
47 |
+
return batch
|
48 |
+
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
49 |
+
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
50 |
+
with torch.no_grad():
|
51 |
+
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
52 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
53 |
+
print("Prediction:", processor.batch_decode(predicted_ids))
|
54 |
+
print("Reference:", test_dataset["sentence"][:2])
|
55 |
+
```
|
56 |
+
## Evaluation
|
57 |
+
The model can be evaluated as follows on the Assamese test data of Common Voice.
|
58 |
+
```python
|
59 |
+
import torch
|
60 |
+
import torchaudio
|
61 |
+
from datasets import load_dataset, load_metric
|
62 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
63 |
+
import re
|
64 |
+
test_dataset = load_dataset("common_voice", "or", split="test")
|
65 |
+
wer = load_metric("wer")
|
66 |
+
processor = Wav2Vec2Processor.from_pretrained("infinitejoy/Wav2Vec2-Large-XLSR-53-Odia")
|
67 |
+
model = Wav2Vec2ForCTC.from_pretrained("infinitejoy/Wav2Vec2-Large-XLSR-53-Odia")
|
68 |
+
model.to("cuda")
|
69 |
+
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\।\–]'
|
70 |
+
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
71 |
+
# Preprocessing the datasets.
|
72 |
+
# We need to read the aduio files as arrays
|
73 |
+
def speech_file_to_array_fn(batch):
|
74 |
+
batch["sentence"] = re.sub('’ ',' ',batch["sentence"])
|
75 |
+
batch["sentence"] = re.sub(' ‘',' ',batch["sentence"])
|
76 |
+
batch["sentence"] = re.sub('’|‘','\'',batch["sentence"])
|
77 |
+
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
78 |
+
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
79 |
+
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
80 |
+
return batch
|
81 |
+
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
82 |
+
# Preprocessing the datasets.
|
83 |
+
# We need to read the aduio files as arrays
|
84 |
+
def evaluate(batch):
|
85 |
+
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
86 |
+
with torch.no_grad():
|
87 |
+
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
|
88 |
+
pred_ids = torch.argmax(logits, dim=-1)
|
89 |
+
batch["pred_strings"] = processor.batch_decode(pred_ids)
|
90 |
+
return batch
|
91 |
+
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
92 |
+
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
93 |
+
```
|
94 |
+
**Test Result**: 55.07 %
|
95 |
+
## Training
|
96 |
+
The Common Voice `train` and `validation` datasets were used for training.
|
config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-large-xlsr-53",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForCTC"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"conv_bias": true,
|
11 |
+
"conv_dim": [
|
12 |
+
512,
|
13 |
+
512,
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512
|
19 |
+
],
|
20 |
+
"conv_kernel": [
|
21 |
+
10,
|
22 |
+
3,
|
23 |
+
3,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
2,
|
27 |
+
2
|
28 |
+
],
|
29 |
+
"conv_stride": [
|
30 |
+
5,
|
31 |
+
2,
|
32 |
+
2,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2
|
37 |
+
],
|
38 |
+
"ctc_loss_reduction": "mean",
|
39 |
+
"ctc_zero_infinity": false,
|
40 |
+
"do_stable_layer_norm": true,
|
41 |
+
"eos_token_id": 2,
|
42 |
+
"feat_extract_activation": "gelu",
|
43 |
+
"feat_extract_dropout": 0.0,
|
44 |
+
"feat_extract_norm": "layer",
|
45 |
+
"feat_proj_dropout": 0.0,
|
46 |
+
"final_dropout": 0.0,
|
47 |
+
"gradient_checkpointing": true,
|
48 |
+
"hidden_act": "gelu",
|
49 |
+
"hidden_dropout": 0.1,
|
50 |
+
"hidden_size": 1024,
|
51 |
+
"initializer_range": 0.02,
|
52 |
+
"intermediate_size": 4096,
|
53 |
+
"layer_norm_eps": 1e-05,
|
54 |
+
"layerdrop": 0.1,
|
55 |
+
"mask_channel_length": 10,
|
56 |
+
"mask_channel_min_space": 1,
|
57 |
+
"mask_channel_other": 0.0,
|
58 |
+
"mask_channel_prob": 0.0,
|
59 |
+
"mask_channel_selection": "static",
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_min_space": 1,
|
64 |
+
"mask_time_other": 0.0,
|
65 |
+
"mask_time_prob": 0.05,
|
66 |
+
"mask_time_selection": "static",
|
67 |
+
"model_type": "wav2vec2",
|
68 |
+
"num_attention_heads": 16,
|
69 |
+
"num_conv_pos_embedding_groups": 16,
|
70 |
+
"num_conv_pos_embeddings": 128,
|
71 |
+
"num_feat_extract_layers": 7,
|
72 |
+
"num_hidden_layers": 24,
|
73 |
+
"pad_token_id": 64,
|
74 |
+
"transformers_version": "4.4.0",
|
75 |
+
"vocab_size": 65
|
76 |
+
}
|
preprocessor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_size": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": true,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5ddd5eceb0130b831e8311aaf078a1fc117ccd2f2ad3a9484ecd83d88677d9d
|
3 |
+
size 1262200343
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "s"}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66a33ee1941159861a963e66dcefea9a4964f9c3b896d89f17124cdbfc4077db
|
3 |
+
size 2351
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ଁ": 0, "ଉ": 1, "ଏ": 2, "ୋ": 3, "ଟ": 4, "ଥ": 5, "ଷ": 6, "ବ": 7, "ଇ": 8, "ନ": 9, "ଅ": 10, "ଭ": 11, "ା": 12, "ର": 13, "ଜ": 14, "ଧ": 15, "ୂ": 16, "ଦ": 17, "ଵ": 18, "ଙ": 19, "ଲ": 20, "’": 21, "ୁ": 22, "ଊ": 23, "ଗ": 25, "ଫ": 26, "ଓ": 27, "ଝ": 28, "୍": 29, "'": 30, "ଈ": 31, "ପ": 32, "|": 33, "ି": 34, "ୃ": 35, "କ": 36, "ଞ": 37, "ଣ": 38, "ସ": 39, "ୟ": 40, "଼": 41, "ୀ": 42, "ୌ": 43, "ଖ": 44, "ଃ": 45, "ଶ": 46, "ମ": 47, "ଂ": 48, "ଳ": 49, "ୈ": 50, "ଠ": 51, "ଆ": 52, "ତ": 53, "ଢ": 54, "ଯ": 55, "ହ": 56, "ଡ": 57, "ଛ": 58, "େ": 59, "ଚ": 60, "ୱ": 61, "ଘ": 62, "s": 24, "[UNK]": 63, "[PAD]": 64}
|