File size: 1,653 Bytes
034b730
 
ffc2aa4
9b97c01
 
034b730
3c8b0eb
ffc2aa4
 
 
3c8b0eb
034b730
 
 
ffc2aa4
9b97c01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import pyarrow as pa
import whisper
from pynput import keyboard
from pynput.keyboard import Key, Events
from dora import Node

import torch
import numpy as np
import pyarrow as pa
import sounddevice as sd
import gc  # garbage collect library

model = whisper.load_model("base")

SAMPLE_RATE = 16000
MAX_DURATION = 30

policy_init = True

node = Node()

for dora_event in node:
    if dora_event["type"] == "INPUT":
        ## Check for keyboard event
        with keyboard.Events() as events:
            event = events.get(1.0)
            if (
                event is not None
                and event.key == Key.alt_r
                and isinstance(event, Events.Press)
            ):

                ## Microphone
                audio_data = sd.rec(
                    int(SAMPLE_RATE * MAX_DURATION),
                    samplerate=SAMPLE_RATE,
                    channels=1,
                    dtype=np.int16,
                    blocking=False,
                )

            elif (
                event is not None
                and event.key == Key.alt_r
                and isinstance(event, Events.Release)
            ):
                sd.stop()
                audio = audio_data.ravel().astype(np.float32) / 32768.0

                ## Speech to text
                audio = whisper.pad_or_trim(audio)
                result = model.transcribe(audio, language="en")
                node.send_output(
                    "text", pa.array([result["text"]]), dora_event["metadata"]
                )
                # send_output("led", pa.array([0, 0, 255]))

                gc.collect()
                torch.cuda.empty_cache()