import pyarrow as pa | |
import whisper | |
from pynput import keyboard | |
from pynput.keyboard import Key | |
from dora import DoraStatus | |
import numpy as np | |
import pyarrow as pa | |
import sounddevice as sd | |
model = whisper.load_model("base") | |
SAMPLE_RATE = 16000 | |
MAX_DURATION = 15 | |
class Operator: | |
""" | |
Transforming Speech to Text using OpenAI Whisper model | |
""" | |
def on_event( | |
self, | |
dora_event, | |
send_output, | |
) -> DoraStatus: | |
if dora_event["type"] == "INPUT": | |
## Check for keyboard event | |
with keyboard.Events() as events: | |
event = events.get(1.0) | |
if event is not None and event.key == Key.up: | |
## Microphone | |
audio_data = sd.rec( | |
int(SAMPLE_RATE * MAX_DURATION), | |
samplerate=SAMPLE_RATE, | |
channels=1, | |
dtype=np.int16, | |
blocking=True, | |
) | |
audio = audio_data.ravel().astype(np.float32) / 32768.0 | |
## Speech to text | |
audio = whisper.pad_or_trim(audio) | |
result = model.transcribe(audio, language="en") | |
send_output( | |
"text", pa.array([result["text"]]), dora_event["metadata"] | |
) | |
return DoraStatus.CONTINUE | |