John Langley commited on
Commit
4404242
1 Parent(s): 97c030c

trying things with cpu

Browse files
Files changed (1) hide show
  1. app.py +20 -20
app.py CHANGED
@@ -53,22 +53,23 @@ mistral_llm = Llama(model_path=mistral_model_path,n_gpu_layers=35,max_new_tokens
53
 
54
  # Load XTTS Model
55
  print("Loading XTTS model")
56
- #os.environ["COQUI_TOS_AGREED"] = "1"
57
- #tts_model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
58
- #ModelManager().download_model(tts_model_name)
59
- #tts_model_path = os.path.join(get_user_data_dir("tts"), tts_model_name.replace("/", "--"))
60
- #config = XttsConfig()
61
- #config.load_json(os.path.join(tts_model_path, "config.json"))
62
- #xtts_model = Xtts.init_from_config(config)
63
- #xtts_model.load_checkpoint(
64
- # config,
65
- # checkpoint_path=os.path.join(tts_model_path, "model.pth"),
66
- # vocab_path=os.path.join(tts_model_path, "vocab.json"),
67
- # eval=True,
68
- # use_deepspeed=True,
69
- #)
 
70
  #xtts_model.cuda()
71
- print("UN-Loading XTTS model")
72
 
73
  ###### Set up Gradio Interface ######
74
 
@@ -142,11 +143,10 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
142
  def handle_speech_generation(sentence, chatbot_history, chatbot_voice):
143
  if sentence != "":
144
  print("Processing sentence")
145
- yield (sentence, chatbot_history, None)
146
- # generated_speech = generate_speech_for_sentence(chatbot_history, chatbot_voice, sentence, xtts_model, xtts_supported_languages=config.languages, return_as_byte=True)
147
- # if generated_speech is not None:
148
- # _, audio_dict = generated_speech
149
- # yield (sentence, chatbot_history, audio_dict["value"])
150
 
151
  if initial_greeting:
152
  # Process only the initial greeting if specified
 
53
 
54
  # Load XTTS Model
55
  print("Loading XTTS model")
56
+ os.environ["COQUI_TOS_AGREED"] = "1"
57
+ tts_model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
58
+ ModelManager().download_model(tts_model_name)
59
+ tts_model_path = os.path.join(get_user_data_dir("tts"), tts_model_name.replace("/", "--"))
60
+ config = XttsConfig()
61
+ config.load_json(os.path.join(tts_model_path, "config.json"))
62
+ xtts_model = Xtts.init_from_config(config)
63
+ xtts_model.to("cpu")
64
+ xtts_model.load_checkpoint(
65
+ config,
66
+ checkpoint_path=os.path.join(tts_model_path, "model.pth"),
67
+ vocab_path=os.path.join(tts_model_path, "vocab.json"),
68
+ eval=True,
69
+ use_deepspeed=True,
70
+ )
71
  #xtts_model.cuda()
72
+ #print("UN-Loading XTTS model")
73
 
74
  ###### Set up Gradio Interface ######
75
 
 
143
  def handle_speech_generation(sentence, chatbot_history, chatbot_voice):
144
  if sentence != "":
145
  print("Processing sentence")
146
+ generated_speech = generate_speech_for_sentence(chatbot_history, chatbot_voice, sentence, xtts_model, xtts_supported_languages=config.languages, return_as_byte=True)
147
+ if generated_speech is not None:
148
+ _, audio_dict = generated_speech
149
+ yield (sentence, chatbot_history, audio_dict["value"])
 
150
 
151
  if initial_greeting:
152
  # Process only the initial greeting if specified