|
import librosa |
|
import torch |
|
from transformers import WhisperProcessor, WhisperForConditionalGeneration |
|
|
|
|
|
|
|
|
|
checkpoint = "openai/whisper-base" |
|
processor = WhisperProcessor.from_pretrained(checkpoint) |
|
model = WhisperForConditionalGeneration.from_pretrained(checkpoint) |
|
|
|
LIMIT = 90 |
|
|
|
class A2T: |
|
def __init__(self, mic): |
|
self.mic = mic |
|
|
|
def __preproccess(self, audio, frame_rate): |
|
try: |
|
audio = audio / 32678.0 |
|
|
|
if len(audio.shape) > 1: |
|
audio = librosa.to_mono(audio.T) |
|
|
|
if frame_rate != 16_000: |
|
audio = librosa.resample(audio, orig_sr=frame_rate, target_sr=16000) |
|
|
|
audio = audio[:16_000*LIMIT] |
|
|
|
audio = torch.tensor(audio) |
|
return audio |
|
except Exception as e: |
|
print("Error", e) |
|
return None |
|
|
|
def predict(self): |
|
if this.mic is not None: |
|
audio = self.mic |
|
frame_rate = audio.frame_rate |
|
else: |
|
return "please provide audio" |
|
|
|
try: |
|
forced_decoder_ids = processor.get_decoder_prompt_ids(language="english", task="transcribe") |
|
audio = self.__preproccess(audio=audio, frame_rate=frame_rate) |
|
inputs = processor(audio=audio, sampling_rate=16000, return_tensors="pt") |
|
predicted_ids = model.generate(**inputs, max_length=400, forced_decoder_ids=forced_decoder_ids) |
|
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) |
|
return transcription[0] |
|
except Exception as e: |
|
print("Error", e) |
|
return "Oops some kinda error" |
|
|