File size: 1,168 Bytes
7bde6c8
0c72fbc
7bde6c8
0c72fbc
7bde6c8
 
e9c304e
0c72fbc
5484ec0
 
0c72fbc
 
 
 
 
 
e9c304e
7bde6c8
 
e9c304e
 
7bde6c8
 
 
 
 
 
0c72fbc
7bde6c8
e9c304e
 
7bde6c8
 
 
 
 
 
 
 
e9c304e
5484ec0
7bde6c8
2a6478f
7bde6c8
 
 
 
e9c304e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
from inference import *
from script import *
import soundfile as sf


def preprocess_audio(audio_array):
    try:
        _, array = audio_array
        sf.write('audio.wav', array, samplerate=48000, subtype='PCM_16')
        return 'audio.wav'
    except TypeError as e:
        pass


def interface(Language, Audio_Inp):
    audio_path = preprocess_audio(Audio_Inp)

    if Language == 'Hausa':
        voice_command =  query(audio_path, 'ha')
        state = activate_hausa(voice_command)
        return state
   
    # elif Language == 'English':
    #      command =  query(Audio, lang ='en')
    #      state = activate_english(command)
    #      return state
         
    elif Language == 'Yoruba':
        voice_command =  query(audio_path, 'yo')
        state = activate_yoruba(voice_command)
        return state

    else:
        pass


demo = gr.Interface(
    fn=interface,
    inputs=[gr.Dropdown(['Hausa', 'English', 'Yoruba'], 
    value = 'Hausa', label='Select Your Prefered Language'), gr.Audio(source ='microphone', type='numpy')],
    outputs="text",
    live=True
)


if __name__ == '__main__':
    demo.launch(share=False)