Commit
·
b29b819
1
Parent(s):
f373715
Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ model-index:
|
|
23 |
metrics:
|
24 |
- name: Test WER
|
25 |
type: wer
|
26 |
-
value:
|
27 |
---
|
28 |
|
29 |
# Wav2Vec2-Large-XLSR-53-Georgian
|
@@ -53,15 +53,15 @@ resampler = lambda sampling_rate, y: librosa.resample(y.numpy().squeeze(), sampl
|
|
53 |
# Preprocessing the datasets.
|
54 |
# We need to read the audio files as arrays
|
55 |
def speech_file_to_array_fn(batch):
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
|
60 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
61 |
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
62 |
|
63 |
with torch.no_grad():
|
64 |
-
|
65 |
|
66 |
predicted_ids = torch.argmax(logits, dim=-1)
|
67 |
|
@@ -90,7 +90,7 @@ processor = Wav2Vec2Processor.from_pretrained("xsway/wav2vec2-large-xlsr-georgia
|
|
90 |
model = Wav2Vec2ForCTC.from_pretrained("xsway/wav2vec2-large-xlsr-georgian")
|
91 |
model.to("cuda")
|
92 |
|
93 |
-
chars_to_ignore_regex = '[
|
94 |
resampler = lambda sampling_rate, y: librosa.resample(y.numpy().squeeze(), sampling_rate, 16_000)
|
95 |
|
96 |
# Preprocessing the datasets.
|
@@ -120,7 +120,7 @@ result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
|
120 |
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
121 |
```
|
122 |
|
123 |
-
**Test Result**:
|
124 |
|
125 |
|
126 |
## Training
|
|
|
23 |
metrics:
|
24 |
- name: Test WER
|
25 |
type: wer
|
26 |
+
value: 45.28
|
27 |
---
|
28 |
|
29 |
# Wav2Vec2-Large-XLSR-53-Georgian
|
|
|
53 |
# Preprocessing the datasets.
|
54 |
# We need to read the audio files as arrays
|
55 |
def speech_file_to_array_fn(batch):
|
56 |
+
\\\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
|
57 |
+
\\\\tbatch["speech"] = resampler(sampling_rate, speech_array).squeeze()
|
58 |
+
\\\\treturn batch
|
59 |
|
60 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
61 |
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
62 |
|
63 |
with torch.no_grad():
|
64 |
+
\\\\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
65 |
|
66 |
predicted_ids = torch.argmax(logits, dim=-1)
|
67 |
|
|
|
90 |
model = Wav2Vec2ForCTC.from_pretrained("xsway/wav2vec2-large-xlsr-georgian")
|
91 |
model.to("cuda")
|
92 |
|
93 |
+
chars_to_ignore_regex = '[\\\\\\\\,\\\\\\\\?\\\\\\\\.\\\\\\\\!\\\\\\\\-\\\\\\\\;\\\\\\\\:\\\\\\\\"\\\\\\\\“]'
|
94 |
resampler = lambda sampling_rate, y: librosa.resample(y.numpy().squeeze(), sampling_rate, 16_000)
|
95 |
|
96 |
# Preprocessing the datasets.
|
|
|
120 |
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
121 |
```
|
122 |
|
123 |
+
**Test Result**: 45.28 %
|
124 |
|
125 |
|
126 |
## Training
|