Spaces:
Runtime error
Runtime error
File size: 4,425 Bytes
1e40d63 5f3740d 1ebd0cd 5f3740d 1ebd0cd 5f3740d c518990 1e40d63 0667117 1ebd0cd 5f3740d 1ebd0cd 1e40d63 5f3740d 1ebd0cd 5f3740d 1ebd0cd 7662900 5f3740d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import gradio as gr
def to_audioClassification():
return {
audio_classification: gr.Row(visible=True),
realtime_classification: gr.Row(visible=False),
speech_recognition: gr.Row(visible=False),
chatbot_qa: gr.Row(visible=False),
}
def to_realtimeAudioClassification():
return {
audio_classification: gr.Row(visible=False),
realtime_classification: gr.Row(visible=True),
speech_recognition: gr.Row(visible=False),
chatbot_qa: gr.Row(visible=False),
}
def to_speechRecognition():
return {
audio_classification: gr.Row(visible=False),
realtime_classification: gr.Row(visible=False),
speech_recognition: gr.Row(visible=True),
chatbot_qa: gr.Row(visible=False),
}
def to_chatbot():
return {
audio_classification: gr.Row(visible=False),
realtime_classification: gr.Row(visible=False),
speech_recognition: gr.Row(visible=False),
chatbot_qa: gr.Row(visible=True),
}
with gr.Blocks() as demo:
with gr.Accordion("Settings", open=True):
language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True)
with gr.Row():
btn0 = gr.Button("Audio Classification", scale=1, size='lg')
btn1 = gr.Button("Realtime Audio Classification", scale=1,size='lg')
btn2 = gr.Button("Speech Recognition", scale=1, size='lg')
btn3 = gr.Button("Help", scale=1, size='lg')
with gr.Row(visible=False) as audio_classification:
with gr.Column(min_width=700):
with gr.Accordion("Record an Audio", open=True):
inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
with gr.Accordion("Upload a file", open=False):
inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
clearBtn = gr.ClearButton([inputRecord, inputUpload])
with gr.Column(min_width=700):
output = gr.Label(label="Audio Classification")
btn = gr.Button(value="Generate Audio")
audioOutput = gr.Audio(label="Audio Output", interactive=False)
with gr.Row(visible=False) as realtime_classification:
with gr.Column(min_width=700):
input = gr.Audio(label="Audio Input", source="microphone", type="filepath",streaming=True, every=10)
historyOutput = gr.Textbox(label="History", interactive=False)
# historyOutput = gr.Label(label="History")
with gr.Column(min_width=700):
output = gr.Label(label="Audio Classification")
with gr.Row(visible=False) as speech_recognition:
with gr.Column(min_width=700):
with gr.Accordion("Record an Audio", open=True):
inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
with gr.Accordion("Upload a file", open=False):
inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
clearBtn = gr.ClearButton([inputRecord])
with gr.Column(min_width=700):
output = gr.Label(label="Transcription")
with gr.Row(visible=False) as chatbot_qa:
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False,
# avatar_images=(None, "/content/avatar-socialear.png"),
min_width=2000
)
with gr.Row(min_width=2000):
txt = gr.Textbox(
scale=4,
show_label=False,
placeholder="Enter text and press enter",
container=False,
min_width=1000
)
submit = gr.Button(value="", size='sm', scale=1)
btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
btn1.click(fn=to_realtimeAudioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
btn2.click(fn=to_speechRecognition, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
btn3.click(fn=to_chatbot, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
if __name__ == "__main__":
demo.queue()
demo.launch() |