import gradio as gr from transformers import pipeline from helpers import load_model_file, load_wav_16k_mono_librosa, initialize_text_to_speech_model, load_label_mapping, predict_yamnet, classify, classify_realtime from helpers import interface, interface_realtime, updateHistory, clearHistory, clear, format_dictionary, format_json from helpers import generate_audio, TTS, TTS_ASR, TTS_chatbot, transcribe_speech, transcribe_speech_realtime, transcribe_realtime, translate_enpt from helpers import chatbot_response, add_text history = "" last_answer = "" examples_audio_classification = [ "content/talking-people.mp3", "content/miaow_16k.wav", "content/birds-in-forest-loop.wav", "content/drumming-jungle-music.wav", "content/driving-in-the-rain.wav", "content/city-alert-siren.wav", "content/small-group-applause.wav", "content/angry-male-crowd-ambience.wav", "content/slow-typing-on-a-keyboard.wav", "content/emergency-car-arrival.wav" ] examples_speech_recognition_en = [ "content/speech1-en.wav", "content/speech2-en.wav", "content/speech1-ptbr.wav", "content/speech2-ptbr.wav", "content/speech3-ptbr.wav" ] examples_speech_recognition_ptbr = [ "content/speech1-ptbr.wav", "content/speech2-ptbr.wav", "content/speech3-ptbr.wav", ] examples_chatbot_en = [ ['How does SocialEar assist people with hearing disabilities?'], ['Give me suggestions on how to use SocialEar'], ['How does SocialEar work?'], ['Are SocialEar results accurate?'], ['What accessibility features does SocialEar offer?'], ['Does SocialEar collect personal data?'], ['Can I use SocialEar to identify songs and artists from recorded audio?'], ] examples_chatbot_ptbr = [ ['Como o SocialEar auxilia pessoas com deficiência auditiva?'], ['Dê-me sugestões sobre como usar o SocialEar'], ['Como funciona o SocialEar?'], ['Os resultados do SocialEar são precisos?'], ['Quais recursos de acessibilidade o SocialEar oferece?'], ['O SocialEar coleta dados pessoais?'], ['Posso usar o SocialEar para identificar músicas e artistas de áudio gravado?'], ] def to_audioClassification(): return { audio_classification: gr.Row(visible=True), realtime_classification: gr.Row(visible=False), speech_recognition: gr.Row(visible=False), chatbot_qa: gr.Row(visible=False), } def to_realtimeAudioClassification(): return { audio_classification: gr.Row(visible=False), realtime_classification: gr.Row(visible=True), speech_recognition: gr.Row(visible=False), chatbot_qa: gr.Row(visible=False), } def to_speechRecognition(): return { audio_classification: gr.Row(visible=False), realtime_classification: gr.Row(visible=False), speech_recognition: gr.Row(visible=True), chatbot_qa: gr.Row(visible=False), } def to_chatbot(): return { audio_classification: gr.Row(visible=False), realtime_classification: gr.Row(visible=False), speech_recognition: gr.Row(visible=False), chatbot_qa: gr.Row(visible=True), } with gr.Blocks() as demo: # with gr.Accordion("Settings", open=True): # language = gr.Radio(["en-us", "pt-br"], label="Language", info="Choose the language to display the classification result and audio", value='en-us', interactive=True) with gr.Row(): btn0 = gr.Button("Classificação de áudio", scale=1, icon='content/Audio Classification.png', size='lg') btn1 = gr.Button("Classificação de áudio em tempo real", scale=1, icon='content/Realtime Audio Classification.png', size='lg') btn2 = gr.Button("Reconhecimento de Fala", scale=1, icon='content/Speech Recognition.png', size='lg') btn3 = gr.Button("Ajuda Q&A", scale=1, icon='content/Chatbot.png', size='lg') with gr.Row(visible=False) as audio_classification: with gr.Column(min_width=700): with gr.Accordion("Grave um áudio", open=True): inputRecord = gr.Audio(label="Entrada de áudio", source="microphone", type="filepath") with gr.Accordion("Carregue um arquivo", open=False): inputUpload = gr.Audio(label="Entrada de áudio", source="upload", type="filepath") clearBtn = gr.ClearButton([inputRecord, inputUpload]) with gr.Column(min_width=700): output = gr.Label(label="Classificação de Áudio") btn = gr.Button(value="Gerar áudio") audioOutput = gr.Audio(label="Saída de áudio", interactive=False) inputRecord.stop_recording(interface, [inputRecord, "pt-br"], [output]) inputUpload.upload(interface, [inputUpload, "pt-br"], [output]) btn.click(fn=TTS, inputs=[output, "pt-br"], outputs=audioOutput) examples = gr.Examples(fn=interface, examples=examples_audio_classification, inputs=[inputRecord], outputs=[output], run_on_click=True) with gr.Row(visible=False) as realtime_classification: with gr.Column(min_width=700): input = gr.Audio(label="Entrada de áudio", source="microphone", type="filepath",streaming=True, every=10) historyOutput = gr.Textbox(label="Histórico", interactive=False) # historyOutput = gr.Label(label="History") with gr.Column(min_width=700): output = gr.Label(label="Classificação de Áudio") input.change(interface_realtime, [input, "pt-br"], output) input.change(updateHistory, None, historyOutput) input.start_recording(clearHistory, None, historyOutput) with gr.Row(visible=False) as speech_recognition: with gr.Column(min_width=700): with gr.Accordion("Grave um áudio", open=True): inputRecord = gr.Audio(label="Entrada de áudio", source="microphone", type="filepath") with gr.Accordion("Carregue um arquivo", open=False): inputUpload = gr.Audio(label="Entrada de áudio", source="upload", type="filepath") clearBtn = gr.ClearButton([inputRecord]) with gr.Column(min_width=700): output = gr.Label(label="Transcrição") inputRecord.stop_recording(transcribe_speech, [inputRecord, "pt-br"], [output]) inputUpload.upload(transcribe_speech, [inputUpload, "pt-br"], [output]) # examplesSpeechEn = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_en, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Examples") examplesSpeechPtbr = gr.Examples(fn=transcribe_speech, examples=examples_speech_recognition_ptbr, inputs=[inputRecord], outputs=[output], run_on_click=True, label="Portuguese Examples") with gr.Row(visible=False) as chatbot_qa: chatbot = gr.Chatbot( [], elem_id="chatbot", bubble_full_width=False, avatar_images=(None, "content/avatar-socialear.png"), min_width=2000 ) with gr.Row(min_width=2000): txt = gr.Textbox( scale=4, show_label=False, placeholder="Escreva o texto e precione enter", container=False, min_width=1000 ) submit = gr.Button(value="", size='sm', scale=1, icon='content/send-icon.png') inputRecord = gr.Audio(label="Grave uma pergunta", source="microphone", type="filepath", min_width=600) btn = gr.Button(value="Escute a resposta") audioOutput = gr.Audio(interactive=False, min_width=600) txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( chatbot_response, [chatbot, "pt-br"], chatbot) txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) submit.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( chatbot_response, [chatbot, "pt-br"], chatbot).then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) inputRecord.stop_recording(transcribe_speech, [inputRecord, "pt-br"], [txt]) btn.click(fn=TTS_chatbot, inputs=["pt-br"], outputs=audioOutput) with gr.Row(min_width=2000): # examplesChatbotEn = gr.Examples(examples=examples_chatbot_en, inputs=[txt], label="English Examples") examplesChatbotPtbr = gr.Examples(examples=examples_chatbot_ptbr, inputs=[txt], label="Exemplos") btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa]) btn1.click(fn=to_realtimeAudioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa]) btn2.click(fn=to_speechRecognition, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa]) btn3.click(fn=to_chatbot, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa]) if __name__ == "__main__": demo.queue() demo.launch()