dora-idefics2 / operators /whisper_op.py
haixuantao's picture
Improving overall components
0827444
import pyarrow as pa
import whisper
from pynput import keyboard
from pynput.keyboard import Key, Events
from dora import Node
import torch
import numpy as np
import pyarrow as pa
import sounddevice as sd
import gc # garbage collect library
model = whisper.load_model("base")
SAMPLE_RATE = 16000
MAX_DURATION = 30
policy_init = True
audio_data = None
node = Node()
for dora_event in node:
if dora_event["type"] == "INPUT":
## Check for keyboard event
with keyboard.Events() as events:
event = events.get(1.0)
if (
event is not None
and (event.key == Key.alt_r or event.key == Key.ctrl_r)
and isinstance(event, Events.Press)
):
## Microphone
audio_data = sd.rec(
int(SAMPLE_RATE * MAX_DURATION),
samplerate=SAMPLE_RATE,
channels=1,
dtype=np.int16,
blocking=False,
)
elif (
event is not None
and event.key == Key.alt_r
and isinstance(event, Events.Release)
):
sd.stop()
if audio_data is None:
continue
audio = audio_data.ravel().astype(np.float32) / 32768.0
## Speech to text
audio = whisper.pad_or_trim(audio)
result = model.transcribe(audio, language="en")
node.send_output(
"text_llm", pa.array([result["text"]]), dora_event["metadata"]
)
# send_output("led", pa.array([0, 0, 255]))
gc.collect()
torch.cuda.empty_cache()
elif (
event is not None
and event.key == Key.ctrl_r
and isinstance(event, Events.Release)
):
sd.stop()
if audio_data is None:
continue
audio = audio_data.ravel().astype(np.float32) / 32768.0
## Speech to text
audio = whisper.pad_or_trim(audio)
result = model.transcribe(audio, language="en")
node.send_output(
"text_policy", pa.array([result["text"]]), dora_event["metadata"]
)
# send_output("led", pa.array([0, 0, 255]))
gc.collect()
torch.cuda.empty_cache()