import gradio as gr from gradio import ChatMessage from utils import audio_to_text, text_to_speech from main import get_chatbot_response, load_peft_model_and_tokenizer PEFT_MODEL = 'microsoft/phi-2' BASE_MODEL = 'bisoye/phi-2-for-mental-health-2' tokenizer, model = load_peft_model_and_tokenizer(PEFT_MODEL, BASE_MODEL) #tokenizer, model = load_peft_model_and_tokenizer(PEFT_MODEL) def respond_to_audio(audio): text_from_audio = audio_to_text(audio) response = get_chatbot_response(model, tokenizer, text_from_audio) return response def chat_history(message: str, history: list): response = get_chatbot_response(model, tokenizer, message) history.append(ChatMessage(role = 'user', content = message)) history.append(ChatMessage(role = 'assistant', content = response)) audio_filename = text_to_speech(response) # convert response to audio return history, audio_filename with gr.Blocks(css='custom_css') as demo: with gr.Row(): with gr.Column(): chatbot = gr.Chatbot(label='Mental Health Chatbot', type = 'messages') input_text = gr.Textbox(label = 'Enter your question here: ') input_audio = gr.Audio(label='Send question as audio: ', sources='microphone', type='filepath') send_audio_button = gr.Button(value = 'Send Audio') with gr.Column(): output_audio = gr.Audio(label='AI audio response: ', sources='upload', type='filepath', interactive=False, autoplay=True) clear_button = gr.ClearButton(components=[ input_text, input_audio, output_audio] ) input_text.submit(chat_history, inputs=[input_text, chatbot], outputs=[chatbot]) send_audio_button.click(respond_to_audio, inputs=[input_audio], outputs=[chatbot, output_audio]) demo.launch()