pedropauletti's picture
Update app.py
1ebd0cd
raw
history blame
4.43 kB
import gradio as gr
def to_audioClassification():
return {
audio_classification: gr.Row(visible=True),
realtime_classification: gr.Row(visible=False),
speech_recognition: gr.Row(visible=False),
chatbot_qa: gr.Row(visible=False),
}
def to_realtimeAudioClassification():
return {
audio_classification: gr.Row(visible=False),
realtime_classification: gr.Row(visible=True),
speech_recognition: gr.Row(visible=False),
chatbot_qa: gr.Row(visible=False),
}
def to_speechRecognition():
return {
audio_classification: gr.Row(visible=False),
realtime_classification: gr.Row(visible=False),
speech_recognition: gr.Row(visible=True),
chatbot_qa: gr.Row(visible=False),
}
def to_chatbot():
return {
audio_classification: gr.Row(visible=False),
realtime_classification: gr.Row(visible=False),
speech_recognition: gr.Row(visible=False),
chatbot_qa: gr.Row(visible=True),
}
with gr.Blocks() as demo:
with gr.Accordion("Settings", open=True):
language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True)
with gr.Row():
btn0 = gr.Button("Audio Classification", scale=1, size='lg')
btn1 = gr.Button("Realtime Audio Classification", scale=1,size='lg')
btn2 = gr.Button("Speech Recognition", scale=1, size='lg')
btn3 = gr.Button("Help", scale=1, size='lg')
with gr.Row(visible=False) as audio_classification:
with gr.Column(min_width=700):
with gr.Accordion("Record an Audio", open=True):
inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
with gr.Accordion("Upload a file", open=False):
inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
clearBtn = gr.ClearButton([inputRecord, inputUpload])
with gr.Column(min_width=700):
output = gr.Label(label="Audio Classification")
btn = gr.Button(value="Generate Audio")
audioOutput = gr.Audio(label="Audio Output", interactive=False)
with gr.Row(visible=False) as realtime_classification:
with gr.Column(min_width=700):
input = gr.Audio(label="Audio Input", source="microphone", type="filepath",streaming=True, every=10)
historyOutput = gr.Textbox(label="History", interactive=False)
# historyOutput = gr.Label(label="History")
with gr.Column(min_width=700):
output = gr.Label(label="Audio Classification")
with gr.Row(visible=False) as speech_recognition:
with gr.Column(min_width=700):
with gr.Accordion("Record an Audio", open=True):
inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
with gr.Accordion("Upload a file", open=False):
inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
clearBtn = gr.ClearButton([inputRecord])
with gr.Column(min_width=700):
output = gr.Label(label="Transcription")
with gr.Row(visible=False) as chatbot_qa:
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False,
# avatar_images=(None, "/content/avatar-socialear.png"),
min_width=2000
)
with gr.Row(min_width=2000):
txt = gr.Textbox(
scale=4,
show_label=False,
placeholder="Enter text and press enter",
container=False,
min_width=1000
)
submit = gr.Button(value="", size='sm', scale=1)
btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
btn1.click(fn=to_realtimeAudioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
btn2.click(fn=to_speechRecognition, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
btn3.click(fn=to_chatbot, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
if __name__ == "__main__":
demo.queue()
demo.launch()