John Langley commited on
Commit
38f8478
1 Parent(s): bc0e3c7

trying things with cpu

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -38,7 +38,7 @@ from llama_cpp import Llama
38
  #from TTS.utils.manage import ModelManager
39
 
40
  # Local imports
41
- from utils import get_sentence #, generate_speech_for_sentence, wave_header_chunk
42
 
43
  # Load Whisper ASR model
44
  print("Loading Whisper ASR")
@@ -136,8 +136,8 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
136
 
137
  def generate_speech(chatbot_history, chatbot_voice, initial_greeting=False):
138
  # Start by yielding an initial empty audio to set up autoplay
139
- #yield ("", chatbot_history, wave_header_chunk())
140
- yield ("", chatbot_history)
141
 
142
  # Helper function to handle the speech generation and yielding process
143
  # def handle_speech_generation(sentence, chatbot_history, chatbot_voice):
@@ -159,14 +159,14 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
159
  # yield from handle_speech_generation(sentence, chatbot_history, chatbot_voice)
160
 
161
  txt_msg = txt_box.submit(fn=add_text, inputs=[chatbot, txt_box], outputs=[chatbot, txt_box], queue=False
162
- )#.then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
163
 
164
  txt_msg.then(fn=lambda: gr.update(interactive=True), inputs=None, outputs=[txt_box], queue=False)
165
 
166
- #audio_msg = audio_record.stop_recording(fn=add_audio, inputs=[chatbot, audio_record], outputs=[chatbot, txt_box], queue=False
167
- # ).then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
168
 
169
- #audio_msg.then(fn=lambda: (gr.update(interactive=True),gr.update(interactive=True,value=None)), inputs=None, outputs=[txt_box, audio_record], queue=False)
170
 
171
  FOOTNOTE = """
172
  This Space demonstrates how to speak to an llm chatbot, based solely on open accessible models.
@@ -179,5 +179,5 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
179
  - Responses generated by chat model should not be assumed correct or taken serious, as this is a demonstration example only
180
  - iOS (Iphone/Ipad) devices may not experience voice due to autoplay being disabled on these devices by Vendor"""
181
  gr.Markdown(FOOTNOTE)
182
- demo.load(fn=generate_speech, inputs=[chatbot,chatbot_voice, gr.State(value=True)], outputs=[sentence, chatbot]) #outputs=[sentence, chatbot, audio_playback])
183
  demo.queue().launch(debug=True,share=True)
 
38
  #from TTS.utils.manage import ModelManager
39
 
40
  # Local imports
41
+ from utils import get_sentence, wave_header_chunk #, generate_speech_for_sentence,
42
 
43
  # Load Whisper ASR model
44
  print("Loading Whisper ASR")
 
136
 
137
  def generate_speech(chatbot_history, chatbot_voice, initial_greeting=False):
138
  # Start by yielding an initial empty audio to set up autoplay
139
+ yield ("", chatbot_history, wave_header_chunk())
140
+ #yield ("", chatbot_history)
141
 
142
  # Helper function to handle the speech generation and yielding process
143
  # def handle_speech_generation(sentence, chatbot_history, chatbot_voice):
 
159
  # yield from handle_speech_generation(sentence, chatbot_history, chatbot_voice)
160
 
161
  txt_msg = txt_box.submit(fn=add_text, inputs=[chatbot, txt_box], outputs=[chatbot, txt_box], queue=False
162
+ ).then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
163
 
164
  txt_msg.then(fn=lambda: gr.update(interactive=True), inputs=None, outputs=[txt_box], queue=False)
165
 
166
+ audio_msg = audio_record.stop_recording(fn=add_audio, inputs=[chatbot, audio_record], outputs=[chatbot, txt_box], queue=False
167
+ ).then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
168
 
169
+ audio_msg.then(fn=lambda: (gr.update(interactive=True),gr.update(interactive=True,value=None)), inputs=None, outputs=[txt_box, audio_record], queue=False)
170
 
171
  FOOTNOTE = """
172
  This Space demonstrates how to speak to an llm chatbot, based solely on open accessible models.
 
179
  - Responses generated by chat model should not be assumed correct or taken serious, as this is a demonstration example only
180
  - iOS (Iphone/Ipad) devices may not experience voice due to autoplay being disabled on these devices by Vendor"""
181
  gr.Markdown(FOOTNOTE)
182
+ demo.load(fn=generate_speech, inputs=[chatbot,chatbot_voice, gr.State(value=True)], outputs=[sentence, chatbot, audio_playback])
183
  demo.queue().launch(debug=True,share=True)